2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
50 #include <net/checksum.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
61 #include <asm/idprom.h>
70 /* Functions & macros to verify TG3_FLAGS types */
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 return test_bit(flag, bits);
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 clear_bit(flag, bits);
87 #define tg3_flag(tp, flag) \
88 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag) \
90 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag) \
92 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MIN_NUM 128
97 #define DRV_MODULE_VERSION \
98 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE "December 03, 2012"
101 #define RESET_KIND_SHUTDOWN 0
102 #define RESET_KIND_INIT 1
103 #define RESET_KIND_SUSPEND 2
105 #define TG3_DEF_RX_MODE 0
106 #define TG3_DEF_TX_MODE 0
107 #define TG3_DEF_MSG_ENABLE \
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119 /* length of time before we decide the hardware is borked,
120 * and dev->tx_timeout() should be called to fix the problem
123 #define TG3_TX_TIMEOUT (5 * HZ)
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU 60
127 #define TG3_MAX_MTU(tp) \
128 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131 * You can't change the ring sizes, but you can change where you place
132 * them in the NIC onboard memory.
134 #define TG3_RX_STD_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING 200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143 /* Do not place this n-ring entries value into the tp struct itself,
144 * we really want to expose these constants to GCC so that modulo et
145 * al. operations are done with shifts and masks instead of with
146 * hw multiply/modulo instructions. Another solution would be to
147 * replace things like '% foo' with '& (foo - 1)'.
150 #define TG3_TX_RING_SIZE 512
151 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153 #define TG3_RX_STD_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 #define TG3_DMA_BYTE_ENAB 64
165 #define TG3_RX_STD_DMA_SZ 1536
166 #define TG3_RX_JMB_DMA_SZ 9046
168 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180 * that are at least dword aligned when used in PCIX mode. The driver
181 * works around this bug by double copying the packet. This workaround
182 * is built into the normal double copy length check for efficiency.
184 * However, the double copy is only necessary on those architectures
185 * where unaligned memory accesses are inefficient. For those architectures
186 * where unaligned memory accesses incur little penalty, we can reintegrate
187 * the 5701 in the normal rx path. Doing so saves a device structure
188 * dereference by hardcoding the double copy threshold in place.
190 #define TG3_RX_COPY_THRESHOLD 256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K 2048
206 #define TG3_TX_BD_DMA_MAX_4K 4096
208 #define TG3_RAW_IP_ALIGN 2
210 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
211 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 #define FIRMWARE_TG3 "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217 static char version[] =
218 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259 TG3_DRV_DATA_FLAG_5705_10_100},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284 PCI_VENDOR_ID_LENOVO,
285 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
346 static const struct {
347 const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
351 { "rx_ucast_packets" },
352 { "rx_mcast_packets" },
353 { "rx_bcast_packets" },
355 { "rx_align_errors" },
356 { "rx_xon_pause_rcvd" },
357 { "rx_xoff_pause_rcvd" },
358 { "rx_mac_ctrl_rcvd" },
359 { "rx_xoff_entered" },
360 { "rx_frame_too_long_errors" },
362 { "rx_undersize_packets" },
363 { "rx_in_length_errors" },
364 { "rx_out_length_errors" },
365 { "rx_64_or_less_octet_packets" },
366 { "rx_65_to_127_octet_packets" },
367 { "rx_128_to_255_octet_packets" },
368 { "rx_256_to_511_octet_packets" },
369 { "rx_512_to_1023_octet_packets" },
370 { "rx_1024_to_1522_octet_packets" },
371 { "rx_1523_to_2047_octet_packets" },
372 { "rx_2048_to_4095_octet_packets" },
373 { "rx_4096_to_8191_octet_packets" },
374 { "rx_8192_to_9022_octet_packets" },
381 { "tx_flow_control" },
383 { "tx_single_collisions" },
384 { "tx_mult_collisions" },
386 { "tx_excessive_collisions" },
387 { "tx_late_collisions" },
388 { "tx_collide_2times" },
389 { "tx_collide_3times" },
390 { "tx_collide_4times" },
391 { "tx_collide_5times" },
392 { "tx_collide_6times" },
393 { "tx_collide_7times" },
394 { "tx_collide_8times" },
395 { "tx_collide_9times" },
396 { "tx_collide_10times" },
397 { "tx_collide_11times" },
398 { "tx_collide_12times" },
399 { "tx_collide_13times" },
400 { "tx_collide_14times" },
401 { "tx_collide_15times" },
402 { "tx_ucast_packets" },
403 { "tx_mcast_packets" },
404 { "tx_bcast_packets" },
405 { "tx_carrier_sense_errors" },
409 { "dma_writeq_full" },
410 { "dma_write_prioq_full" },
414 { "rx_threshold_hit" },
416 { "dma_readq_full" },
417 { "dma_read_prioq_full" },
418 { "tx_comp_queue_full" },
420 { "ring_set_send_prod_index" },
421 { "ring_status_update" },
423 { "nic_avoided_irqs" },
424 { "nic_tx_threshold_hit" },
426 { "mbuf_lwm_thresh_hit" },
429 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST 0
431 #define TG3_LINK_TEST 1
432 #define TG3_REGISTER_TEST 2
433 #define TG3_MEMORY_TEST 3
434 #define TG3_MAC_LOOPB_TEST 4
435 #define TG3_PHY_LOOPB_TEST 5
436 #define TG3_EXT_LOOPB_TEST 6
437 #define TG3_INTERRUPT_TEST 7
440 static const struct {
441 const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443 [TG3_NVRAM_TEST] = { "nvram test (online) " },
444 [TG3_LINK_TEST] = { "link test (online) " },
445 [TG3_REGISTER_TEST] = { "register test (offline)" },
446 [TG3_MEMORY_TEST] = { "memory test (offline)" },
447 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
448 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
449 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
450 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
453 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
458 writel(val, tp->regs + off);
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
463 return readl(tp->regs + off);
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
468 writel(val, tp->aperegs + off);
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
473 return readl(tp->aperegs + off);
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483 spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
488 writel(val, tp->regs + off);
489 readl(tp->regs + off);
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
497 spin_lock_irqsave(&tp->indirect_lock, flags);
498 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
508 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510 TG3_64BIT_REG_LOW, val);
513 if (off == TG3_RX_STD_PROD_IDX_REG) {
514 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515 TG3_64BIT_REG_LOW, val);
519 spin_lock_irqsave(&tp->indirect_lock, flags);
520 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522 spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 /* In indirect mode when disabling interrupts, we also need
525 * to clear the interrupt bit in the GRC local ctrl register.
527 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
529 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
539 spin_lock_irqsave(&tp->indirect_lock, flags);
540 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547 * where it is unsafe to read back the register without some delay.
548 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
553 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554 /* Non-posted methods */
555 tp->write32(tp, off, val);
558 tg3_write32(tp, off, val);
563 /* Wait again after the read for the posted method to guarantee that
564 * the wait time is met.
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
572 tp->write32_mbox(tp, off, val);
573 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
574 tp->read32_mbox(tp, off);
577 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
579 void __iomem *mbox = tp->regs + off;
581 if (tg3_flag(tp, TXD_MBOX_HWBUG))
583 if (tg3_flag(tp, MBOX_WRITE_REORDER))
587 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
589 return readl(tp->regs + off + GRCMBOX_BASE);
592 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
594 writel(val, tp->regs + off + GRCMBOX_BASE);
597 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
598 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
599 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
600 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
601 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
603 #define tw32(reg, val) tp->write32(tp, reg, val)
604 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
605 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
606 #define tr32(reg) tp->read32(tp, reg)
608 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
613 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
616 spin_lock_irqsave(&tp->indirect_lock, flags);
617 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
618 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
619 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
621 /* Always leave this as zero. */
622 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
624 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
625 tw32_f(TG3PCI_MEM_WIN_DATA, val);
627 /* Always leave this as zero. */
628 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
630 spin_unlock_irqrestore(&tp->indirect_lock, flags);
633 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
638 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
643 spin_lock_irqsave(&tp->indirect_lock, flags);
644 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
645 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
646 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
648 /* Always leave this as zero. */
649 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
651 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
652 *val = tr32(TG3PCI_MEM_WIN_DATA);
654 /* Always leave this as zero. */
655 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
657 spin_unlock_irqrestore(&tp->indirect_lock, flags);
660 static void tg3_ape_lock_init(struct tg3 *tp)
665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666 regbase = TG3_APE_LOCK_GRANT;
668 regbase = TG3_APE_PER_LOCK_GRANT;
670 /* Make sure the driver hasn't any stale locks. */
671 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
673 case TG3_APE_LOCK_PHY0:
674 case TG3_APE_LOCK_PHY1:
675 case TG3_APE_LOCK_PHY2:
676 case TG3_APE_LOCK_PHY3:
677 bit = APE_LOCK_GRANT_DRIVER;
681 bit = APE_LOCK_GRANT_DRIVER;
683 bit = 1 << tp->pci_fn;
685 tg3_ape_write32(tp, regbase + 4 * i, bit);
690 static int tg3_ape_lock(struct tg3 *tp, int locknum)
694 u32 status, req, gnt, bit;
696 if (!tg3_flag(tp, ENABLE_APE))
700 case TG3_APE_LOCK_GPIO:
701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
703 case TG3_APE_LOCK_GRC:
704 case TG3_APE_LOCK_MEM:
706 bit = APE_LOCK_REQ_DRIVER;
708 bit = 1 << tp->pci_fn;
710 case TG3_APE_LOCK_PHY0:
711 case TG3_APE_LOCK_PHY1:
712 case TG3_APE_LOCK_PHY2:
713 case TG3_APE_LOCK_PHY3:
714 bit = APE_LOCK_REQ_DRIVER;
720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
721 req = TG3_APE_LOCK_REQ;
722 gnt = TG3_APE_LOCK_GRANT;
724 req = TG3_APE_PER_LOCK_REQ;
725 gnt = TG3_APE_PER_LOCK_GRANT;
730 tg3_ape_write32(tp, req + off, bit);
732 /* Wait for up to 1 millisecond to acquire lock. */
733 for (i = 0; i < 100; i++) {
734 status = tg3_ape_read32(tp, gnt + off);
741 /* Revoke the lock request. */
742 tg3_ape_write32(tp, gnt + off, bit);
749 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
753 if (!tg3_flag(tp, ENABLE_APE))
757 case TG3_APE_LOCK_GPIO:
758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
760 case TG3_APE_LOCK_GRC:
761 case TG3_APE_LOCK_MEM:
763 bit = APE_LOCK_GRANT_DRIVER;
765 bit = 1 << tp->pci_fn;
767 case TG3_APE_LOCK_PHY0:
768 case TG3_APE_LOCK_PHY1:
769 case TG3_APE_LOCK_PHY2:
770 case TG3_APE_LOCK_PHY3:
771 bit = APE_LOCK_GRANT_DRIVER;
777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
778 gnt = TG3_APE_LOCK_GRANT;
780 gnt = TG3_APE_PER_LOCK_GRANT;
782 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
785 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
790 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
793 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
794 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
797 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
800 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
803 return timeout_us ? 0 : -EBUSY;
806 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
810 for (i = 0; i < timeout_us / 10; i++) {
811 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
813 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819 return i == timeout_us / 10;
822 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
826 u32 i, bufoff, msgoff, maxlen, apedata;
828 if (!tg3_flag(tp, APE_HAS_NCSI))
831 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
832 if (apedata != APE_SEG_SIG_MAGIC)
835 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
836 if (!(apedata & APE_FW_STATUS_READY))
839 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
841 msgoff = bufoff + 2 * sizeof(u32);
842 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
847 /* Cap xfer sizes to scratchpad limits. */
848 length = (len > maxlen) ? maxlen : len;
851 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
852 if (!(apedata & APE_FW_STATUS_READY))
855 /* Wait for up to 1 msec for APE to service previous event. */
856 err = tg3_ape_event_lock(tp, 1000);
860 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
861 APE_EVENT_STATUS_SCRTCHPD_READ |
862 APE_EVENT_STATUS_EVENT_PENDING;
863 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
865 tg3_ape_write32(tp, bufoff, base_off);
866 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
868 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
869 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873 if (tg3_ape_wait_for_event(tp, 30000))
876 for (i = 0; length; i += 4, length -= 4) {
877 u32 val = tg3_ape_read32(tp, msgoff + i);
878 memcpy(data, &val, sizeof(u32));
886 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
891 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
892 if (apedata != APE_SEG_SIG_MAGIC)
895 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
896 if (!(apedata & APE_FW_STATUS_READY))
899 /* Wait for up to 1 millisecond for APE to service previous event. */
900 err = tg3_ape_event_lock(tp, 1000);
904 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
905 event | APE_EVENT_STATUS_EVENT_PENDING);
907 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
908 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
918 if (!tg3_flag(tp, ENABLE_APE))
922 case RESET_KIND_INIT:
923 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
924 APE_HOST_SEG_SIG_MAGIC);
925 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
926 APE_HOST_SEG_LEN_MAGIC);
927 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
928 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
929 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
930 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
931 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
932 APE_HOST_BEHAV_NO_PHYLOCK);
933 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
934 TG3_APE_HOST_DRVR_STATE_START);
936 event = APE_EVENT_STATUS_STATE_START;
938 case RESET_KIND_SHUTDOWN:
939 /* With the interface we are currently using,
940 * APE does not track driver state. Wiping
941 * out the HOST SEGMENT SIGNATURE forces
942 * the APE to assume OS absent status.
944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
946 if (device_may_wakeup(&tp->pdev->dev) &&
947 tg3_flag(tp, WOL_ENABLE)) {
948 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
949 TG3_APE_HOST_WOL_SPEED_AUTO);
950 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
952 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
956 event = APE_EVENT_STATUS_STATE_UNLOAD;
958 case RESET_KIND_SUSPEND:
959 event = APE_EVENT_STATUS_STATE_SUSPEND;
965 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
967 tg3_ape_send_event(tp, event);
970 static void tg3_disable_ints(struct tg3 *tp)
974 tw32(TG3PCI_MISC_HOST_CTRL,
975 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
976 for (i = 0; i < tp->irq_max; i++)
977 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
980 static void tg3_enable_ints(struct tg3 *tp)
987 tw32(TG3PCI_MISC_HOST_CTRL,
988 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
990 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
991 for (i = 0; i < tp->irq_cnt; i++) {
992 struct tg3_napi *tnapi = &tp->napi[i];
994 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
995 if (tg3_flag(tp, 1SHOT_MSI))
996 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
998 tp->coal_now |= tnapi->coal_now;
1001 /* Force an initial interrupt */
1002 if (!tg3_flag(tp, TAGGED_STATUS) &&
1003 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1004 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1006 tw32(HOSTCC_MODE, tp->coal_now);
1008 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1011 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1013 struct tg3 *tp = tnapi->tp;
1014 struct tg3_hw_status *sblk = tnapi->hw_status;
1015 unsigned int work_exists = 0;
1017 /* check for phy events */
1018 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1019 if (sblk->status & SD_STATUS_LINK_CHG)
1023 /* check for TX work to do */
1024 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1027 /* check for RX work to do */
1028 if (tnapi->rx_rcb_prod_idx &&
1029 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1036 * similar to tg3_enable_ints, but it accurately determines whether there
1037 * is new work pending and can return without flushing the PIO write
1038 * which reenables interrupts
1040 static void tg3_int_reenable(struct tg3_napi *tnapi)
1042 struct tg3 *tp = tnapi->tp;
1044 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1047 /* When doing tagged status, this work check is unnecessary.
1048 * The last_tag we write above tells the chip which piece of
1049 * work we've completed.
1051 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1052 tw32(HOSTCC_MODE, tp->coalesce_mode |
1053 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1056 static void tg3_switch_clocks(struct tg3 *tp)
1059 u32 orig_clock_ctrl;
1061 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1064 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1066 orig_clock_ctrl = clock_ctrl;
1067 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1068 CLOCK_CTRL_CLKRUN_OENABLE |
1070 tp->pci_clock_ctrl = clock_ctrl;
1072 if (tg3_flag(tp, 5705_PLUS)) {
1073 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1074 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1075 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1077 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1078 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1080 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1082 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1086 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1089 #define PHY_BUSY_LOOPS 5000
1091 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1097 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1103 tg3_ape_lock(tp, tp->phy_ape_lock);
1107 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1108 MI_COM_PHY_ADDR_MASK);
1109 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1110 MI_COM_REG_ADDR_MASK);
1111 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1113 tw32_f(MAC_MI_COM, frame_val);
1115 loops = PHY_BUSY_LOOPS;
1116 while (loops != 0) {
1118 frame_val = tr32(MAC_MI_COM);
1120 if ((frame_val & MI_COM_BUSY) == 0) {
1122 frame_val = tr32(MAC_MI_COM);
1130 *val = frame_val & MI_COM_DATA_MASK;
1134 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135 tw32_f(MAC_MI_MODE, tp->mi_mode);
1139 tg3_ape_unlock(tp, tp->phy_ape_lock);
1144 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1150 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1151 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1154 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1156 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1160 tg3_ape_lock(tp, tp->phy_ape_lock);
1162 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1163 MI_COM_PHY_ADDR_MASK);
1164 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1165 MI_COM_REG_ADDR_MASK);
1166 frame_val |= (val & MI_COM_DATA_MASK);
1167 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1169 tw32_f(MAC_MI_COM, frame_val);
1171 loops = PHY_BUSY_LOOPS;
1172 while (loops != 0) {
1174 frame_val = tr32(MAC_MI_COM);
1175 if ((frame_val & MI_COM_BUSY) == 0) {
1177 frame_val = tr32(MAC_MI_COM);
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 tw32_f(MAC_MI_MODE, tp->mi_mode);
1192 tg3_ape_unlock(tp, tp->phy_ape_lock);
1197 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1201 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1205 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1209 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1210 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1214 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1220 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1224 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1228 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1232 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1233 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1237 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1243 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1247 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1249 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1254 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1258 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1260 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1265 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1270 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1271 MII_TG3_AUXCTL_SHDWSEL_MISC);
1273 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1278 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1280 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1281 set |= MII_TG3_AUXCTL_MISC_WREN;
1283 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1286 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1287 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1288 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1289 MII_TG3_AUXCTL_ACTL_TX_6DB)
1291 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1292 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1293 MII_TG3_AUXCTL_ACTL_TX_6DB);
1295 static int tg3_bmcr_reset(struct tg3 *tp)
1300 /* OK, reset it, and poll the BMCR_RESET bit until it
1301 * clears or we time out.
1303 phy_control = BMCR_RESET;
1304 err = tg3_writephy(tp, MII_BMCR, phy_control);
1310 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1314 if ((phy_control & BMCR_RESET) == 0) {
1326 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1328 struct tg3 *tp = bp->priv;
1331 spin_lock_bh(&tp->lock);
1333 if (tg3_readphy(tp, reg, &val))
1336 spin_unlock_bh(&tp->lock);
1341 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1343 struct tg3 *tp = bp->priv;
1346 spin_lock_bh(&tp->lock);
1348 if (tg3_writephy(tp, reg, val))
1351 spin_unlock_bh(&tp->lock);
1356 static int tg3_mdio_reset(struct mii_bus *bp)
1361 static void tg3_mdio_config_5785(struct tg3 *tp)
1364 struct phy_device *phydev;
1366 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1367 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1368 case PHY_ID_BCM50610:
1369 case PHY_ID_BCM50610M:
1370 val = MAC_PHYCFG2_50610_LED_MODES;
1372 case PHY_ID_BCMAC131:
1373 val = MAC_PHYCFG2_AC131_LED_MODES;
1375 case PHY_ID_RTL8211C:
1376 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1378 case PHY_ID_RTL8201E:
1379 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1385 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1386 tw32(MAC_PHYCFG2, val);
1388 val = tr32(MAC_PHYCFG1);
1389 val &= ~(MAC_PHYCFG1_RGMII_INT |
1390 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1391 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1392 tw32(MAC_PHYCFG1, val);
1397 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1398 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1399 MAC_PHYCFG2_FMODE_MASK_MASK |
1400 MAC_PHYCFG2_GMODE_MASK_MASK |
1401 MAC_PHYCFG2_ACT_MASK_MASK |
1402 MAC_PHYCFG2_QUAL_MASK_MASK |
1403 MAC_PHYCFG2_INBAND_ENABLE;
1405 tw32(MAC_PHYCFG2, val);
1407 val = tr32(MAC_PHYCFG1);
1408 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1409 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1410 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1411 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1412 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1413 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1414 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1416 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1417 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1418 tw32(MAC_PHYCFG1, val);
1420 val = tr32(MAC_EXT_RGMII_MODE);
1421 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1422 MAC_RGMII_MODE_RX_QUALITY |
1423 MAC_RGMII_MODE_RX_ACTIVITY |
1424 MAC_RGMII_MODE_RX_ENG_DET |
1425 MAC_RGMII_MODE_TX_ENABLE |
1426 MAC_RGMII_MODE_TX_LOWPWR |
1427 MAC_RGMII_MODE_TX_RESET);
1428 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1429 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1430 val |= MAC_RGMII_MODE_RX_INT_B |
1431 MAC_RGMII_MODE_RX_QUALITY |
1432 MAC_RGMII_MODE_RX_ACTIVITY |
1433 MAC_RGMII_MODE_RX_ENG_DET;
1434 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1435 val |= MAC_RGMII_MODE_TX_ENABLE |
1436 MAC_RGMII_MODE_TX_LOWPWR |
1437 MAC_RGMII_MODE_TX_RESET;
1439 tw32(MAC_EXT_RGMII_MODE, val);
1442 static void tg3_mdio_start(struct tg3 *tp)
1444 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1445 tw32_f(MAC_MI_MODE, tp->mi_mode);
1448 if (tg3_flag(tp, MDIOBUS_INITED) &&
1449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1450 tg3_mdio_config_5785(tp);
1453 static int tg3_mdio_init(struct tg3 *tp)
1457 struct phy_device *phydev;
1459 if (tg3_flag(tp, 5717_PLUS)) {
1462 tp->phy_addr = tp->pci_fn + 1;
1464 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1465 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1467 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1468 TG3_CPMU_PHY_STRAP_IS_SERDES;
1472 tp->phy_addr = TG3_PHY_MII_ADDR;
1476 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1479 tp->mdio_bus = mdiobus_alloc();
1480 if (tp->mdio_bus == NULL)
1483 tp->mdio_bus->name = "tg3 mdio bus";
1484 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1485 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1486 tp->mdio_bus->priv = tp;
1487 tp->mdio_bus->parent = &tp->pdev->dev;
1488 tp->mdio_bus->read = &tg3_mdio_read;
1489 tp->mdio_bus->write = &tg3_mdio_write;
1490 tp->mdio_bus->reset = &tg3_mdio_reset;
1491 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1492 tp->mdio_bus->irq = &tp->mdio_irq[0];
1494 for (i = 0; i < PHY_MAX_ADDR; i++)
1495 tp->mdio_bus->irq[i] = PHY_POLL;
1497 /* The bus registration will look for all the PHYs on the mdio bus.
1498 * Unfortunately, it does not ensure the PHY is powered up before
1499 * accessing the PHY ID registers. A chip reset is the
1500 * quickest way to bring the device back to an operational state..
1502 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1505 i = mdiobus_register(tp->mdio_bus);
1507 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1508 mdiobus_free(tp->mdio_bus);
1512 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1514 if (!phydev || !phydev->drv) {
1515 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1516 mdiobus_unregister(tp->mdio_bus);
1517 mdiobus_free(tp->mdio_bus);
1521 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1522 case PHY_ID_BCM57780:
1523 phydev->interface = PHY_INTERFACE_MODE_GMII;
1524 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1526 case PHY_ID_BCM50610:
1527 case PHY_ID_BCM50610M:
1528 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1529 PHY_BRCM_RX_REFCLK_UNUSED |
1530 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1531 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1532 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1533 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1534 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1535 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1536 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1537 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1539 case PHY_ID_RTL8211C:
1540 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1542 case PHY_ID_RTL8201E:
1543 case PHY_ID_BCMAC131:
1544 phydev->interface = PHY_INTERFACE_MODE_MII;
1545 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1546 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1550 tg3_flag_set(tp, MDIOBUS_INITED);
1552 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1553 tg3_mdio_config_5785(tp);
1558 static void tg3_mdio_fini(struct tg3 *tp)
1560 if (tg3_flag(tp, MDIOBUS_INITED)) {
1561 tg3_flag_clear(tp, MDIOBUS_INITED);
1562 mdiobus_unregister(tp->mdio_bus);
1563 mdiobus_free(tp->mdio_bus);
1567 /* tp->lock is held. */
1568 static inline void tg3_generate_fw_event(struct tg3 *tp)
1572 val = tr32(GRC_RX_CPU_EVENT);
1573 val |= GRC_RX_CPU_DRIVER_EVENT;
1574 tw32_f(GRC_RX_CPU_EVENT, val);
1576 tp->last_event_jiffies = jiffies;
1579 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1581 /* tp->lock is held. */
1582 static void tg3_wait_for_event_ack(struct tg3 *tp)
1585 unsigned int delay_cnt;
1588 /* If enough time has passed, no wait is necessary. */
1589 time_remain = (long)(tp->last_event_jiffies + 1 +
1590 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1592 if (time_remain < 0)
1595 /* Check if we can shorten the wait time. */
1596 delay_cnt = jiffies_to_usecs(time_remain);
1597 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1598 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1599 delay_cnt = (delay_cnt >> 3) + 1;
1601 for (i = 0; i < delay_cnt; i++) {
1602 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1608 /* tp->lock is held. */
1609 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1614 if (!tg3_readphy(tp, MII_BMCR, ®))
1616 if (!tg3_readphy(tp, MII_BMSR, ®))
1617 val |= (reg & 0xffff);
1621 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1623 if (!tg3_readphy(tp, MII_LPA, ®))
1624 val |= (reg & 0xffff);
1628 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1629 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1631 if (!tg3_readphy(tp, MII_STAT1000, ®))
1632 val |= (reg & 0xffff);
1636 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1643 /* tp->lock is held. */
1644 static void tg3_ump_link_report(struct tg3 *tp)
1648 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1651 tg3_phy_gather_ump_data(tp, data);
1653 tg3_wait_for_event_ack(tp);
1655 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1656 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1657 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1658 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1659 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1660 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1662 tg3_generate_fw_event(tp);
1665 /* tp->lock is held. */
1666 static void tg3_stop_fw(struct tg3 *tp)
1668 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1669 /* Wait for RX cpu to ACK the previous event. */
1670 tg3_wait_for_event_ack(tp);
1672 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1674 tg3_generate_fw_event(tp);
1676 /* Wait for RX cpu to ACK this event. */
1677 tg3_wait_for_event_ack(tp);
1681 /* tp->lock is held. */
1682 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1684 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1685 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1687 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1689 case RESET_KIND_INIT:
1690 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1694 case RESET_KIND_SHUTDOWN:
1695 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1699 case RESET_KIND_SUSPEND:
1700 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709 if (kind == RESET_KIND_INIT ||
1710 kind == RESET_KIND_SUSPEND)
1711 tg3_ape_driver_state_change(tp, kind);
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1717 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1719 case RESET_KIND_INIT:
1720 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1721 DRV_STATE_START_DONE);
1724 case RESET_KIND_SHUTDOWN:
1725 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1726 DRV_STATE_UNLOAD_DONE);
1734 if (kind == RESET_KIND_SHUTDOWN)
1735 tg3_ape_driver_state_change(tp, kind);
1738 /* tp->lock is held. */
1739 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1741 if (tg3_flag(tp, ENABLE_ASF)) {
1743 case RESET_KIND_INIT:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 case RESET_KIND_SHUTDOWN:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 case RESET_KIND_SUSPEND:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1764 static int tg3_poll_fw(struct tg3 *tp)
1769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1770 /* Wait up to 20ms for init done. */
1771 for (i = 0; i < 200; i++) {
1772 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1779 /* Wait for firmware initialization to complete. */
1780 for (i = 0; i < 100000; i++) {
1781 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1782 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1787 /* Chip might not be fitted with firmware. Some Sun onboard
1788 * parts are configured like that. So don't signal the timeout
1789 * of the above loop as an error, but do report the lack of
1790 * running firmware once.
1792 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1793 tg3_flag_set(tp, NO_FWARE_REPORTED);
1795 netdev_info(tp->dev, "No firmware running\n");
1798 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1799 /* The 57765 A0 needs a little more
1800 * time to do some important work.
1808 static void tg3_link_report(struct tg3 *tp)
1810 if (!netif_carrier_ok(tp->dev)) {
1811 netif_info(tp, link, tp->dev, "Link is down\n");
1812 tg3_ump_link_report(tp);
1813 } else if (netif_msg_link(tp)) {
1814 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1815 (tp->link_config.active_speed == SPEED_1000 ?
1817 (tp->link_config.active_speed == SPEED_100 ?
1819 (tp->link_config.active_duplex == DUPLEX_FULL ?
1822 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1823 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1825 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1828 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1829 netdev_info(tp->dev, "EEE is %s\n",
1830 tp->setlpicnt ? "enabled" : "disabled");
1832 tg3_ump_link_report(tp);
1836 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1840 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1841 miireg = ADVERTISE_1000XPAUSE;
1842 else if (flow_ctrl & FLOW_CTRL_TX)
1843 miireg = ADVERTISE_1000XPSE_ASYM;
1844 else if (flow_ctrl & FLOW_CTRL_RX)
1845 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1852 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1856 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1857 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1858 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1859 if (lcladv & ADVERTISE_1000XPAUSE)
1861 if (rmtadv & ADVERTISE_1000XPAUSE)
1868 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1872 u32 old_rx_mode = tp->rx_mode;
1873 u32 old_tx_mode = tp->tx_mode;
1875 if (tg3_flag(tp, USE_PHYLIB))
1876 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1878 autoneg = tp->link_config.autoneg;
1880 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1881 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1882 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1884 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1886 flowctrl = tp->link_config.flowctrl;
1888 tp->link_config.active_flowctrl = flowctrl;
1890 if (flowctrl & FLOW_CTRL_RX)
1891 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1893 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1895 if (old_rx_mode != tp->rx_mode)
1896 tw32_f(MAC_RX_MODE, tp->rx_mode);
1898 if (flowctrl & FLOW_CTRL_TX)
1899 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1901 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1903 if (old_tx_mode != tp->tx_mode)
1904 tw32_f(MAC_TX_MODE, tp->tx_mode);
1907 static void tg3_adjust_link(struct net_device *dev)
1909 u8 oldflowctrl, linkmesg = 0;
1910 u32 mac_mode, lcl_adv, rmt_adv;
1911 struct tg3 *tp = netdev_priv(dev);
1912 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1914 spin_lock_bh(&tp->lock);
1916 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1917 MAC_MODE_HALF_DUPLEX);
1919 oldflowctrl = tp->link_config.active_flowctrl;
1925 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1926 mac_mode |= MAC_MODE_PORT_MODE_MII;
1927 else if (phydev->speed == SPEED_1000 ||
1928 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1929 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1931 mac_mode |= MAC_MODE_PORT_MODE_MII;
1933 if (phydev->duplex == DUPLEX_HALF)
1934 mac_mode |= MAC_MODE_HALF_DUPLEX;
1936 lcl_adv = mii_advertise_flowctrl(
1937 tp->link_config.flowctrl);
1940 rmt_adv = LPA_PAUSE_CAP;
1941 if (phydev->asym_pause)
1942 rmt_adv |= LPA_PAUSE_ASYM;
1945 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1947 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1949 if (mac_mode != tp->mac_mode) {
1950 tp->mac_mode = mac_mode;
1951 tw32_f(MAC_MODE, tp->mac_mode);
1955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1956 if (phydev->speed == SPEED_10)
1958 MAC_MI_STAT_10MBPS_MODE |
1959 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1961 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1964 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1965 tw32(MAC_TX_LENGTHS,
1966 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1967 (6 << TX_LENGTHS_IPG_SHIFT) |
1968 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1970 tw32(MAC_TX_LENGTHS,
1971 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1972 (6 << TX_LENGTHS_IPG_SHIFT) |
1973 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1975 if (phydev->link != tp->old_link ||
1976 phydev->speed != tp->link_config.active_speed ||
1977 phydev->duplex != tp->link_config.active_duplex ||
1978 oldflowctrl != tp->link_config.active_flowctrl)
1981 tp->old_link = phydev->link;
1982 tp->link_config.active_speed = phydev->speed;
1983 tp->link_config.active_duplex = phydev->duplex;
1985 spin_unlock_bh(&tp->lock);
1988 tg3_link_report(tp);
1991 static int tg3_phy_init(struct tg3 *tp)
1993 struct phy_device *phydev;
1995 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1998 /* Bring the PHY back to a known state. */
2001 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2003 /* Attach the MAC to the PHY. */
2004 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
2005 phydev->dev_flags, phydev->interface);
2006 if (IS_ERR(phydev)) {
2007 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2008 return PTR_ERR(phydev);
2011 /* Mask with MAC supported features. */
2012 switch (phydev->interface) {
2013 case PHY_INTERFACE_MODE_GMII:
2014 case PHY_INTERFACE_MODE_RGMII:
2015 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2016 phydev->supported &= (PHY_GBIT_FEATURES |
2018 SUPPORTED_Asym_Pause);
2022 case PHY_INTERFACE_MODE_MII:
2023 phydev->supported &= (PHY_BASIC_FEATURES |
2025 SUPPORTED_Asym_Pause);
2028 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2032 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2034 phydev->advertising = phydev->supported;
2039 static void tg3_phy_start(struct tg3 *tp)
2041 struct phy_device *phydev;
2043 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2046 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2048 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2049 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2050 phydev->speed = tp->link_config.speed;
2051 phydev->duplex = tp->link_config.duplex;
2052 phydev->autoneg = tp->link_config.autoneg;
2053 phydev->advertising = tp->link_config.advertising;
2058 phy_start_aneg(phydev);
2061 static void tg3_phy_stop(struct tg3 *tp)
2063 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2066 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2069 static void tg3_phy_fini(struct tg3 *tp)
2071 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2072 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2077 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2082 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2085 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2086 /* Cannot do read-modify-write on 5401 */
2087 err = tg3_phy_auxctl_write(tp,
2088 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2089 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2094 err = tg3_phy_auxctl_read(tp,
2095 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2099 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2100 err = tg3_phy_auxctl_write(tp,
2101 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2107 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2111 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2114 tg3_writephy(tp, MII_TG3_FET_TEST,
2115 phytest | MII_TG3_FET_SHADOW_EN);
2116 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2118 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2120 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2121 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2123 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2127 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2131 if (!tg3_flag(tp, 5705_PLUS) ||
2132 (tg3_flag(tp, 5717_PLUS) &&
2133 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2136 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2137 tg3_phy_fet_toggle_apd(tp, enable);
2141 reg = MII_TG3_MISC_SHDW_WREN |
2142 MII_TG3_MISC_SHDW_SCR5_SEL |
2143 MII_TG3_MISC_SHDW_SCR5_LPED |
2144 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2145 MII_TG3_MISC_SHDW_SCR5_SDTL |
2146 MII_TG3_MISC_SHDW_SCR5_C125OE;
2147 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2148 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2150 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2153 reg = MII_TG3_MISC_SHDW_WREN |
2154 MII_TG3_MISC_SHDW_APD_SEL |
2155 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2157 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2159 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2162 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2166 if (!tg3_flag(tp, 5705_PLUS) ||
2167 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2170 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2173 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2174 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2176 tg3_writephy(tp, MII_TG3_FET_TEST,
2177 ephy | MII_TG3_FET_SHADOW_EN);
2178 if (!tg3_readphy(tp, reg, &phy)) {
2180 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2182 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2183 tg3_writephy(tp, reg, phy);
2185 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2190 ret = tg3_phy_auxctl_read(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2194 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2196 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2197 tg3_phy_auxctl_write(tp,
2198 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2203 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2208 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2211 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2213 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2214 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2217 static void tg3_phy_apply_otp(struct tg3 *tp)
2226 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2229 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2230 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2231 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2233 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2234 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2235 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2237 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2238 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2239 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2241 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2242 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2244 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2245 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2247 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2248 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2249 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2251 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2258 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2263 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2264 current_link_up == 1 &&
2265 tp->link_config.active_duplex == DUPLEX_FULL &&
2266 (tp->link_config.active_speed == SPEED_100 ||
2267 tp->link_config.active_speed == SPEED_1000)) {
2270 if (tp->link_config.active_speed == SPEED_1000)
2271 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2273 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2275 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2277 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2278 TG3_CL45_D7_EEERES_STAT, &val);
2280 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2281 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2285 if (!tp->setlpicnt) {
2286 if (current_link_up == 1 &&
2287 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2288 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2289 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2292 val = tr32(TG3_CPMU_EEE_MODE);
2293 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2297 static void tg3_phy_eee_enable(struct tg3 *tp)
2301 if (tp->link_config.active_speed == SPEED_1000 &&
2302 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2304 tg3_flag(tp, 57765_CLASS)) &&
2305 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2306 val = MII_TG3_DSP_TAP26_ALNOKO |
2307 MII_TG3_DSP_TAP26_RMRXSTO;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2309 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312 val = tr32(TG3_CPMU_EEE_MODE);
2313 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2316 static int tg3_wait_macro_done(struct tg3 *tp)
2323 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2324 if ((tmp32 & 0x1000) == 0)
2334 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2336 static const u32 test_pat[4][6] = {
2337 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2338 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2339 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2340 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2344 for (chan = 0; chan < 4; chan++) {
2347 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2348 (chan * 0x2000) | 0x0200);
2349 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2351 for (i = 0; i < 6; i++)
2352 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2355 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2356 if (tg3_wait_macro_done(tp)) {
2361 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2362 (chan * 0x2000) | 0x0200);
2363 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2364 if (tg3_wait_macro_done(tp)) {
2369 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2370 if (tg3_wait_macro_done(tp)) {
2375 for (i = 0; i < 6; i += 2) {
2378 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2379 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2380 tg3_wait_macro_done(tp)) {
2386 if (low != test_pat[chan][i] ||
2387 high != test_pat[chan][i+1]) {
2388 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2389 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2390 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2400 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2404 for (chan = 0; chan < 4; chan++) {
2407 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2408 (chan * 0x2000) | 0x0200);
2409 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2410 for (i = 0; i < 6; i++)
2411 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2412 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2413 if (tg3_wait_macro_done(tp))
2420 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2422 u32 reg32, phy9_orig;
2423 int retries, do_phy_reset, err;
2429 err = tg3_bmcr_reset(tp);
2435 /* Disable transmitter and interrupt. */
2436 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2440 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2442 /* Set full-duplex, 1000 mbps. */
2443 tg3_writephy(tp, MII_BMCR,
2444 BMCR_FULLDPLX | BMCR_SPEED1000);
2446 /* Set to master mode. */
2447 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2450 tg3_writephy(tp, MII_CTRL1000,
2451 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2453 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2457 /* Block the PHY control access. */
2458 tg3_phydsp_write(tp, 0x8005, 0x0800);
2460 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2463 } while (--retries);
2465 err = tg3_phy_reset_chanpat(tp);
2469 tg3_phydsp_write(tp, 0x8005, 0x0000);
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2474 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2476 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2478 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2480 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2487 static void tg3_carrier_on(struct tg3 *tp)
2489 netif_carrier_on(tp->dev);
2493 static void tg3_carrier_off(struct tg3 *tp)
2495 netif_carrier_off(tp->dev);
2496 tp->link_up = false;
2499 /* This will reset the tigon3 PHY if there is no valid
2500 * link unless the FORCE argument is non-zero.
2502 static int tg3_phy_reset(struct tg3 *tp)
2507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2508 val = tr32(GRC_MISC_CFG);
2509 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2512 err = tg3_readphy(tp, MII_BMSR, &val);
2513 err |= tg3_readphy(tp, MII_BMSR, &val);
2517 if (netif_running(tp->dev) && tp->link_up) {
2518 tg3_carrier_off(tp);
2519 tg3_link_report(tp);
2522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2525 err = tg3_phy_reset_5703_4_5(tp);
2532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2533 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2534 cpmuctrl = tr32(TG3_CPMU_CTRL);
2535 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2537 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2540 err = tg3_bmcr_reset(tp);
2544 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2545 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2546 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2548 tw32(TG3_CPMU_CTRL, cpmuctrl);
2551 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2552 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2553 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2554 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2555 CPMU_LSPD_1000MB_MACCLK_12_5) {
2556 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2558 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2562 if (tg3_flag(tp, 5717_PLUS) &&
2563 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2566 tg3_phy_apply_otp(tp);
2568 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2569 tg3_phy_toggle_apd(tp, true);
2571 tg3_phy_toggle_apd(tp, false);
2574 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2575 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2576 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2577 tg3_phydsp_write(tp, 0x000a, 0x0323);
2578 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2581 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2582 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2583 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2586 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2587 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2588 tg3_phydsp_write(tp, 0x000a, 0x310b);
2589 tg3_phydsp_write(tp, 0x201f, 0x9506);
2590 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2591 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2593 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2594 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2596 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2597 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2598 tg3_writephy(tp, MII_TG3_TEST1,
2599 MII_TG3_TEST1_TRIM_EN | 0x4);
2601 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2603 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2607 /* Set Extended packet length bit (bit 14) on all chips that */
2608 /* support jumbo frames */
2609 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2610 /* Cannot do read-modify-write on 5401 */
2611 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2612 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2613 /* Set bit 14 with read-modify-write to preserve other bits */
2614 err = tg3_phy_auxctl_read(tp,
2615 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2617 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2618 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2621 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2622 * jumbo frames transmission.
2624 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2625 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2626 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2627 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2631 /* adjust output voltage */
2632 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2635 if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2636 tg3_phydsp_write(tp, 0xffb, 0x4000);
2638 tg3_phy_toggle_automdix(tp, 1);
2639 tg3_phy_set_wirespeed(tp);
2643 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2644 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2645 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2646 TG3_GPIO_MSG_NEED_VAUX)
2647 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2648 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2649 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2650 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2651 (TG3_GPIO_MSG_DRVR_PRES << 12))
2653 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2654 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2655 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2656 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2657 (TG3_GPIO_MSG_NEED_VAUX << 12))
2659 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2665 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2667 status = tr32(TG3_CPMU_DRV_STATUS);
2669 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2670 status &= ~(TG3_GPIO_MSG_MASK << shift);
2671 status |= (newstat << shift);
2673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2675 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2677 tw32(TG3_CPMU_DRV_STATUS, status);
2679 return status >> TG3_APE_GPIO_MSG_SHIFT;
2682 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2684 if (!tg3_flag(tp, IS_NIC))
2687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2690 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2693 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2695 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2696 TG3_GRC_LCLCTL_PWRSW_DELAY);
2698 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2700 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2701 TG3_GRC_LCLCTL_PWRSW_DELAY);
2707 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2711 if (!tg3_flag(tp, IS_NIC) ||
2712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2713 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2716 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2718 tw32_wait_f(GRC_LOCAL_CTRL,
2719 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2720 TG3_GRC_LCLCTL_PWRSW_DELAY);
2722 tw32_wait_f(GRC_LOCAL_CTRL,
2724 TG3_GRC_LCLCTL_PWRSW_DELAY);
2726 tw32_wait_f(GRC_LOCAL_CTRL,
2727 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2728 TG3_GRC_LCLCTL_PWRSW_DELAY);
2731 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2733 if (!tg3_flag(tp, IS_NIC))
2736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2738 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2739 (GRC_LCLCTRL_GPIO_OE0 |
2740 GRC_LCLCTRL_GPIO_OE1 |
2741 GRC_LCLCTRL_GPIO_OE2 |
2742 GRC_LCLCTRL_GPIO_OUTPUT0 |
2743 GRC_LCLCTRL_GPIO_OUTPUT1),
2744 TG3_GRC_LCLCTL_PWRSW_DELAY);
2745 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2746 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2747 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2748 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2749 GRC_LCLCTRL_GPIO_OE1 |
2750 GRC_LCLCTRL_GPIO_OE2 |
2751 GRC_LCLCTRL_GPIO_OUTPUT0 |
2752 GRC_LCLCTRL_GPIO_OUTPUT1 |
2754 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2755 TG3_GRC_LCLCTL_PWRSW_DELAY);
2757 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2758 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2759 TG3_GRC_LCLCTL_PWRSW_DELAY);
2761 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2762 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2763 TG3_GRC_LCLCTL_PWRSW_DELAY);
2766 u32 grc_local_ctrl = 0;
2768 /* Workaround to prevent overdrawing Amps. */
2769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2770 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2771 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2773 TG3_GRC_LCLCTL_PWRSW_DELAY);
2776 /* On 5753 and variants, GPIO2 cannot be used. */
2777 no_gpio2 = tp->nic_sram_data_cfg &
2778 NIC_SRAM_DATA_CFG_NO_GPIO2;
2780 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2781 GRC_LCLCTRL_GPIO_OE1 |
2782 GRC_LCLCTRL_GPIO_OE2 |
2783 GRC_LCLCTRL_GPIO_OUTPUT1 |
2784 GRC_LCLCTRL_GPIO_OUTPUT2;
2786 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2787 GRC_LCLCTRL_GPIO_OUTPUT2);
2789 tw32_wait_f(GRC_LOCAL_CTRL,
2790 tp->grc_local_ctrl | grc_local_ctrl,
2791 TG3_GRC_LCLCTL_PWRSW_DELAY);
2793 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2795 tw32_wait_f(GRC_LOCAL_CTRL,
2796 tp->grc_local_ctrl | grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2800 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2801 tw32_wait_f(GRC_LOCAL_CTRL,
2802 tp->grc_local_ctrl | grc_local_ctrl,
2803 TG3_GRC_LCLCTL_PWRSW_DELAY);
2808 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2812 /* Serialize power state transitions */
2813 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2816 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2817 msg = TG3_GPIO_MSG_NEED_VAUX;
2819 msg = tg3_set_function_status(tp, msg);
2821 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2824 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2825 tg3_pwrsrc_switch_to_vaux(tp);
2827 tg3_pwrsrc_die_with_vmain(tp);
2830 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2833 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2835 bool need_vaux = false;
2837 /* The GPIOs do something completely different on 57765. */
2838 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2844 tg3_frob_aux_power_5717(tp, include_wol ?
2845 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2849 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2850 struct net_device *dev_peer;
2852 dev_peer = pci_get_drvdata(tp->pdev_peer);
2854 /* remove_one() may have been run on the peer. */
2856 struct tg3 *tp_peer = netdev_priv(dev_peer);
2858 if (tg3_flag(tp_peer, INIT_COMPLETE))
2861 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2862 tg3_flag(tp_peer, ENABLE_ASF))
2867 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2868 tg3_flag(tp, ENABLE_ASF))
2872 tg3_pwrsrc_switch_to_vaux(tp);
2874 tg3_pwrsrc_die_with_vmain(tp);
2877 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2879 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2881 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2882 if (speed != SPEED_10)
2884 } else if (speed == SPEED_10)
2890 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2894 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2896 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2897 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2900 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2901 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2902 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2909 val = tr32(GRC_MISC_CFG);
2910 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2913 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2915 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2918 tg3_writephy(tp, MII_ADVERTISE, 0);
2919 tg3_writephy(tp, MII_BMCR,
2920 BMCR_ANENABLE | BMCR_ANRESTART);
2922 tg3_writephy(tp, MII_TG3_FET_TEST,
2923 phytest | MII_TG3_FET_SHADOW_EN);
2924 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2925 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2927 MII_TG3_FET_SHDW_AUXMODE4,
2930 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2933 } else if (do_low_power) {
2934 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2935 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2937 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2938 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2939 MII_TG3_AUXCTL_PCTL_VREG_11V;
2940 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2943 /* The PHY should not be powered down on some chips because
2946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2948 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2949 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2950 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2954 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2955 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2956 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2957 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2958 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2959 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2962 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2965 /* tp->lock is held. */
2966 static int tg3_nvram_lock(struct tg3 *tp)
2968 if (tg3_flag(tp, NVRAM)) {
2971 if (tp->nvram_lock_cnt == 0) {
2972 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2973 for (i = 0; i < 8000; i++) {
2974 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2979 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2983 tp->nvram_lock_cnt++;
2988 /* tp->lock is held. */
2989 static void tg3_nvram_unlock(struct tg3 *tp)
2991 if (tg3_flag(tp, NVRAM)) {
2992 if (tp->nvram_lock_cnt > 0)
2993 tp->nvram_lock_cnt--;
2994 if (tp->nvram_lock_cnt == 0)
2995 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2999 /* tp->lock is held. */
3000 static void tg3_enable_nvram_access(struct tg3 *tp)
3002 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3003 u32 nvaccess = tr32(NVRAM_ACCESS);
3005 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3009 /* tp->lock is held. */
3010 static void tg3_disable_nvram_access(struct tg3 *tp)
3012 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3013 u32 nvaccess = tr32(NVRAM_ACCESS);
3015 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3019 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3020 u32 offset, u32 *val)
3025 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3028 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3029 EEPROM_ADDR_DEVID_MASK |
3031 tw32(GRC_EEPROM_ADDR,
3033 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3034 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3035 EEPROM_ADDR_ADDR_MASK) |
3036 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3038 for (i = 0; i < 1000; i++) {
3039 tmp = tr32(GRC_EEPROM_ADDR);
3041 if (tmp & EEPROM_ADDR_COMPLETE)
3045 if (!(tmp & EEPROM_ADDR_COMPLETE))
3048 tmp = tr32(GRC_EEPROM_DATA);
3051 * The data will always be opposite the native endian
3052 * format. Perform a blind byteswap to compensate.
3059 #define NVRAM_CMD_TIMEOUT 10000
3061 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3065 tw32(NVRAM_CMD, nvram_cmd);
3066 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3068 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3074 if (i == NVRAM_CMD_TIMEOUT)
3080 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3082 if (tg3_flag(tp, NVRAM) &&
3083 tg3_flag(tp, NVRAM_BUFFERED) &&
3084 tg3_flag(tp, FLASH) &&
3085 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3086 (tp->nvram_jedecnum == JEDEC_ATMEL))
3088 addr = ((addr / tp->nvram_pagesize) <<
3089 ATMEL_AT45DB0X1B_PAGE_POS) +
3090 (addr % tp->nvram_pagesize);
3095 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3097 if (tg3_flag(tp, NVRAM) &&
3098 tg3_flag(tp, NVRAM_BUFFERED) &&
3099 tg3_flag(tp, FLASH) &&
3100 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3101 (tp->nvram_jedecnum == JEDEC_ATMEL))
3103 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3104 tp->nvram_pagesize) +
3105 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3110 /* NOTE: Data read in from NVRAM is byteswapped according to
3111 * the byteswapping settings for all other register accesses.
3112 * tg3 devices are BE devices, so on a BE machine, the data
3113 * returned will be exactly as it is seen in NVRAM. On a LE
3114 * machine, the 32-bit value will be byteswapped.
3116 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3120 if (!tg3_flag(tp, NVRAM))
3121 return tg3_nvram_read_using_eeprom(tp, offset, val);
3123 offset = tg3_nvram_phys_addr(tp, offset);
3125 if (offset > NVRAM_ADDR_MSK)
3128 ret = tg3_nvram_lock(tp);
3132 tg3_enable_nvram_access(tp);
3134 tw32(NVRAM_ADDR, offset);
3135 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3136 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3139 *val = tr32(NVRAM_RDDATA);
3141 tg3_disable_nvram_access(tp);
3143 tg3_nvram_unlock(tp);
3148 /* Ensures NVRAM data is in bytestream format. */
3149 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3152 int res = tg3_nvram_read(tp, offset, &v);
3154 *val = cpu_to_be32(v);
3158 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3159 u32 offset, u32 len, u8 *buf)
3164 for (i = 0; i < len; i += 4) {
3170 memcpy(&data, buf + i, 4);
3173 * The SEEPROM interface expects the data to always be opposite
3174 * the native endian format. We accomplish this by reversing
3175 * all the operations that would have been performed on the
3176 * data from a call to tg3_nvram_read_be32().
3178 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3180 val = tr32(GRC_EEPROM_ADDR);
3181 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3183 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3185 tw32(GRC_EEPROM_ADDR, val |
3186 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3187 (addr & EEPROM_ADDR_ADDR_MASK) |
3191 for (j = 0; j < 1000; j++) {
3192 val = tr32(GRC_EEPROM_ADDR);
3194 if (val & EEPROM_ADDR_COMPLETE)
3198 if (!(val & EEPROM_ADDR_COMPLETE)) {
3207 /* offset and length are dword aligned */
3208 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3212 u32 pagesize = tp->nvram_pagesize;
3213 u32 pagemask = pagesize - 1;
3217 tmp = kmalloc(pagesize, GFP_KERNEL);
3223 u32 phy_addr, page_off, size;
3225 phy_addr = offset & ~pagemask;
3227 for (j = 0; j < pagesize; j += 4) {
3228 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3229 (__be32 *) (tmp + j));
3236 page_off = offset & pagemask;
3243 memcpy(tmp + page_off, buf, size);
3245 offset = offset + (pagesize - page_off);
3247 tg3_enable_nvram_access(tp);
3250 * Before we can erase the flash page, we need
3251 * to issue a special "write enable" command.
3253 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3255 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3258 /* Erase the target page */
3259 tw32(NVRAM_ADDR, phy_addr);
3261 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3262 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3264 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3267 /* Issue another write enable to start the write. */
3268 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3270 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3273 for (j = 0; j < pagesize; j += 4) {
3276 data = *((__be32 *) (tmp + j));
3278 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3280 tw32(NVRAM_ADDR, phy_addr + j);
3282 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3286 nvram_cmd |= NVRAM_CMD_FIRST;
3287 else if (j == (pagesize - 4))
3288 nvram_cmd |= NVRAM_CMD_LAST;
3290 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3298 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3299 tg3_nvram_exec_cmd(tp, nvram_cmd);
3306 /* offset and length are dword aligned */
3307 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3312 for (i = 0; i < len; i += 4, offset += 4) {
3313 u32 page_off, phy_addr, nvram_cmd;
3316 memcpy(&data, buf + i, 4);
3317 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3319 page_off = offset % tp->nvram_pagesize;
3321 phy_addr = tg3_nvram_phys_addr(tp, offset);
3323 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3325 if (page_off == 0 || i == 0)
3326 nvram_cmd |= NVRAM_CMD_FIRST;
3327 if (page_off == (tp->nvram_pagesize - 4))
3328 nvram_cmd |= NVRAM_CMD_LAST;
3331 nvram_cmd |= NVRAM_CMD_LAST;
3333 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3334 !tg3_flag(tp, FLASH) ||
3335 !tg3_flag(tp, 57765_PLUS))
3336 tw32(NVRAM_ADDR, phy_addr);
3338 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3339 !tg3_flag(tp, 5755_PLUS) &&
3340 (tp->nvram_jedecnum == JEDEC_ST) &&
3341 (nvram_cmd & NVRAM_CMD_FIRST)) {
3344 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3345 ret = tg3_nvram_exec_cmd(tp, cmd);
3349 if (!tg3_flag(tp, FLASH)) {
3350 /* We always do complete word writes to eeprom. */
3351 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3354 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3361 /* offset and length are dword aligned */
3362 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3366 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3367 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3368 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3372 if (!tg3_flag(tp, NVRAM)) {
3373 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3377 ret = tg3_nvram_lock(tp);
3381 tg3_enable_nvram_access(tp);
3382 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3383 tw32(NVRAM_WRITE1, 0x406);
3385 grc_mode = tr32(GRC_MODE);
3386 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3388 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3389 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3392 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3396 grc_mode = tr32(GRC_MODE);
3397 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3399 tg3_disable_nvram_access(tp);
3400 tg3_nvram_unlock(tp);
3403 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3404 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3411 #define RX_CPU_SCRATCH_BASE 0x30000
3412 #define RX_CPU_SCRATCH_SIZE 0x04000
3413 #define TX_CPU_SCRATCH_BASE 0x34000
3414 #define TX_CPU_SCRATCH_SIZE 0x04000
3416 /* tp->lock is held. */
3417 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3421 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3424 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3426 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3429 if (offset == RX_CPU_BASE) {
3430 for (i = 0; i < 10000; i++) {
3431 tw32(offset + CPU_STATE, 0xffffffff);
3432 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3433 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3437 tw32(offset + CPU_STATE, 0xffffffff);
3438 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3441 for (i = 0; i < 10000; i++) {
3442 tw32(offset + CPU_STATE, 0xffffffff);
3443 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3444 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3450 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3451 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3455 /* Clear firmware's nvram arbitration. */
3456 if (tg3_flag(tp, NVRAM))
3457 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3462 unsigned int fw_base;
3463 unsigned int fw_len;
3464 const __be32 *fw_data;
3467 /* tp->lock is held. */
3468 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3469 u32 cpu_scratch_base, int cpu_scratch_size,
3470 struct fw_info *info)
3472 int err, lock_err, i;
3473 void (*write_op)(struct tg3 *, u32, u32);
3475 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3477 "%s: Trying to load TX cpu firmware which is 5705\n",
3482 if (tg3_flag(tp, 5705_PLUS))
3483 write_op = tg3_write_mem;
3485 write_op = tg3_write_indirect_reg32;
3487 /* It is possible that bootcode is still loading at this point.
3488 * Get the nvram lock first before halting the cpu.
3490 lock_err = tg3_nvram_lock(tp);
3491 err = tg3_halt_cpu(tp, cpu_base);
3493 tg3_nvram_unlock(tp);
3497 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3498 write_op(tp, cpu_scratch_base + i, 0);
3499 tw32(cpu_base + CPU_STATE, 0xffffffff);
3500 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3501 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3502 write_op(tp, (cpu_scratch_base +
3503 (info->fw_base & 0xffff) +
3505 be32_to_cpu(info->fw_data[i]));
3513 /* tp->lock is held. */
3514 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3516 struct fw_info info;
3517 const __be32 *fw_data;
3520 fw_data = (void *)tp->fw->data;
3522 /* Firmware blob starts with version numbers, followed by
3523 start address and length. We are setting complete length.
3524 length = end_address_of_bss - start_address_of_text.
3525 Remainder is the blob to be loaded contiguously
3526 from start address. */
3528 info.fw_base = be32_to_cpu(fw_data[1]);
3529 info.fw_len = tp->fw->size - 12;
3530 info.fw_data = &fw_data[3];
3532 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3533 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3538 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3539 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3544 /* Now startup only the RX cpu. */
3545 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3546 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3548 for (i = 0; i < 5; i++) {
3549 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3551 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3552 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3553 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3557 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3558 "should be %08x\n", __func__,
3559 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3562 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3563 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3568 /* tp->lock is held. */
3569 static int tg3_load_tso_firmware(struct tg3 *tp)
3571 struct fw_info info;
3572 const __be32 *fw_data;
3573 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3576 if (tg3_flag(tp, HW_TSO_1) ||
3577 tg3_flag(tp, HW_TSO_2) ||
3578 tg3_flag(tp, HW_TSO_3))
3581 fw_data = (void *)tp->fw->data;
3583 /* Firmware blob starts with version numbers, followed by
3584 start address and length. We are setting complete length.
3585 length = end_address_of_bss - start_address_of_text.
3586 Remainder is the blob to be loaded contiguously
3587 from start address. */
3589 info.fw_base = be32_to_cpu(fw_data[1]);
3590 cpu_scratch_size = tp->fw_len;
3591 info.fw_len = tp->fw->size - 12;
3592 info.fw_data = &fw_data[3];
3594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3595 cpu_base = RX_CPU_BASE;
3596 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3598 cpu_base = TX_CPU_BASE;
3599 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3600 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3603 err = tg3_load_firmware_cpu(tp, cpu_base,
3604 cpu_scratch_base, cpu_scratch_size,
3609 /* Now startup the cpu. */
3610 tw32(cpu_base + CPU_STATE, 0xffffffff);
3611 tw32_f(cpu_base + CPU_PC, info.fw_base);
3613 for (i = 0; i < 5; i++) {
3614 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3616 tw32(cpu_base + CPU_STATE, 0xffffffff);
3617 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3618 tw32_f(cpu_base + CPU_PC, info.fw_base);
3623 "%s fails to set CPU PC, is %08x should be %08x\n",
3624 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3627 tw32(cpu_base + CPU_STATE, 0xffffffff);
3628 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3633 /* tp->lock is held. */
3634 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3636 u32 addr_high, addr_low;
3639 addr_high = ((tp->dev->dev_addr[0] << 8) |
3640 tp->dev->dev_addr[1]);
3641 addr_low = ((tp->dev->dev_addr[2] << 24) |
3642 (tp->dev->dev_addr[3] << 16) |
3643 (tp->dev->dev_addr[4] << 8) |
3644 (tp->dev->dev_addr[5] << 0));
3645 for (i = 0; i < 4; i++) {
3646 if (i == 1 && skip_mac_1)
3648 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3649 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3654 for (i = 0; i < 12; i++) {
3655 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3656 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3660 addr_high = (tp->dev->dev_addr[0] +
3661 tp->dev->dev_addr[1] +
3662 tp->dev->dev_addr[2] +
3663 tp->dev->dev_addr[3] +
3664 tp->dev->dev_addr[4] +
3665 tp->dev->dev_addr[5]) &
3666 TX_BACKOFF_SEED_MASK;
3667 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3670 static void tg3_enable_register_access(struct tg3 *tp)
3673 * Make sure register accesses (indirect or otherwise) will function
3676 pci_write_config_dword(tp->pdev,
3677 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3680 static int tg3_power_up(struct tg3 *tp)
3684 tg3_enable_register_access(tp);
3686 err = pci_set_power_state(tp->pdev, PCI_D0);
3688 /* Switch out of Vaux if it is a NIC */
3689 tg3_pwrsrc_switch_to_vmain(tp);
3691 netdev_err(tp->dev, "Transition to D0 failed\n");
3697 static int tg3_setup_phy(struct tg3 *, int);
3699 static int tg3_power_down_prepare(struct tg3 *tp)
3702 bool device_should_wake, do_low_power;
3704 tg3_enable_register_access(tp);
3706 /* Restore the CLKREQ setting. */
3707 if (tg3_flag(tp, CLKREQ_BUG))
3708 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3709 PCI_EXP_LNKCTL_CLKREQ_EN);
3711 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3712 tw32(TG3PCI_MISC_HOST_CTRL,
3713 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3715 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3716 tg3_flag(tp, WOL_ENABLE);
3718 if (tg3_flag(tp, USE_PHYLIB)) {
3719 do_low_power = false;
3720 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3721 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3722 struct phy_device *phydev;
3723 u32 phyid, advertising;
3725 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3727 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3729 tp->link_config.speed = phydev->speed;
3730 tp->link_config.duplex = phydev->duplex;
3731 tp->link_config.autoneg = phydev->autoneg;
3732 tp->link_config.advertising = phydev->advertising;
3734 advertising = ADVERTISED_TP |
3736 ADVERTISED_Autoneg |
3737 ADVERTISED_10baseT_Half;
3739 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3740 if (tg3_flag(tp, WOL_SPEED_100MB))
3742 ADVERTISED_100baseT_Half |
3743 ADVERTISED_100baseT_Full |
3744 ADVERTISED_10baseT_Full;
3746 advertising |= ADVERTISED_10baseT_Full;
3749 phydev->advertising = advertising;
3751 phy_start_aneg(phydev);
3753 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3754 if (phyid != PHY_ID_BCMAC131) {
3755 phyid &= PHY_BCM_OUI_MASK;
3756 if (phyid == PHY_BCM_OUI_1 ||
3757 phyid == PHY_BCM_OUI_2 ||
3758 phyid == PHY_BCM_OUI_3)
3759 do_low_power = true;
3763 do_low_power = true;
3765 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3766 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3768 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3769 tg3_setup_phy(tp, 0);
3772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3775 val = tr32(GRC_VCPU_EXT_CTRL);
3776 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3777 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3781 for (i = 0; i < 200; i++) {
3782 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3783 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3788 if (tg3_flag(tp, WOL_CAP))
3789 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3790 WOL_DRV_STATE_SHUTDOWN |
3794 if (device_should_wake) {
3797 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3799 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3800 tg3_phy_auxctl_write(tp,
3801 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3802 MII_TG3_AUXCTL_PCTL_WOL_EN |
3803 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3804 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3808 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3809 mac_mode = MAC_MODE_PORT_MODE_GMII;
3811 mac_mode = MAC_MODE_PORT_MODE_MII;
3813 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3814 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3816 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3817 SPEED_100 : SPEED_10;
3818 if (tg3_5700_link_polarity(tp, speed))
3819 mac_mode |= MAC_MODE_LINK_POLARITY;
3821 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3824 mac_mode = MAC_MODE_PORT_MODE_TBI;
3827 if (!tg3_flag(tp, 5750_PLUS))
3828 tw32(MAC_LED_CTRL, tp->led_ctrl);
3830 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3831 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3832 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3833 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3835 if (tg3_flag(tp, ENABLE_APE))
3836 mac_mode |= MAC_MODE_APE_TX_EN |
3837 MAC_MODE_APE_RX_EN |
3838 MAC_MODE_TDE_ENABLE;
3840 tw32_f(MAC_MODE, mac_mode);
3843 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3847 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3848 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3852 base_val = tp->pci_clock_ctrl;
3853 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3854 CLOCK_CTRL_TXCLK_DISABLE);
3856 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3857 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3858 } else if (tg3_flag(tp, 5780_CLASS) ||
3859 tg3_flag(tp, CPMU_PRESENT) ||
3860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3862 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3863 u32 newbits1, newbits2;
3865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3867 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3868 CLOCK_CTRL_TXCLK_DISABLE |
3870 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3871 } else if (tg3_flag(tp, 5705_PLUS)) {
3872 newbits1 = CLOCK_CTRL_625_CORE;
3873 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3875 newbits1 = CLOCK_CTRL_ALTCLK;
3876 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3879 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3882 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3885 if (!tg3_flag(tp, 5705_PLUS)) {
3888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3890 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3891 CLOCK_CTRL_TXCLK_DISABLE |
3892 CLOCK_CTRL_44MHZ_CORE);
3894 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3897 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3898 tp->pci_clock_ctrl | newbits3, 40);
3902 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3903 tg3_power_down_phy(tp, do_low_power);
3905 tg3_frob_aux_power(tp, true);
3907 /* Workaround for unstable PLL clock */
3908 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3909 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3910 u32 val = tr32(0x7d00);
3912 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3914 if (!tg3_flag(tp, ENABLE_ASF)) {
3917 err = tg3_nvram_lock(tp);
3918 tg3_halt_cpu(tp, RX_CPU_BASE);
3920 tg3_nvram_unlock(tp);
3924 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3929 static void tg3_power_down(struct tg3 *tp)
3931 tg3_power_down_prepare(tp);
3933 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3934 pci_set_power_state(tp->pdev, PCI_D3hot);
3937 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3939 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3940 case MII_TG3_AUX_STAT_10HALF:
3942 *duplex = DUPLEX_HALF;
3945 case MII_TG3_AUX_STAT_10FULL:
3947 *duplex = DUPLEX_FULL;
3950 case MII_TG3_AUX_STAT_100HALF:
3952 *duplex = DUPLEX_HALF;
3955 case MII_TG3_AUX_STAT_100FULL:
3957 *duplex = DUPLEX_FULL;
3960 case MII_TG3_AUX_STAT_1000HALF:
3961 *speed = SPEED_1000;
3962 *duplex = DUPLEX_HALF;
3965 case MII_TG3_AUX_STAT_1000FULL:
3966 *speed = SPEED_1000;
3967 *duplex = DUPLEX_FULL;
3971 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3972 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3974 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3978 *speed = SPEED_UNKNOWN;
3979 *duplex = DUPLEX_UNKNOWN;
3984 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3989 new_adv = ADVERTISE_CSMA;
3990 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3991 new_adv |= mii_advertise_flowctrl(flowctrl);
3993 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3997 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3998 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4000 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4001 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4002 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4004 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4009 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4012 tw32(TG3_CPMU_EEE_MODE,
4013 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4015 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4020 /* Advertise 100-BaseTX EEE ability */
4021 if (advertise & ADVERTISED_100baseT_Full)
4022 val |= MDIO_AN_EEE_ADV_100TX;
4023 /* Advertise 1000-BaseT EEE ability */
4024 if (advertise & ADVERTISED_1000baseT_Full)
4025 val |= MDIO_AN_EEE_ADV_1000T;
4026 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4030 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4032 case ASIC_REV_57765:
4033 case ASIC_REV_57766:
4035 /* If we advertised any eee advertisements above... */
4037 val = MII_TG3_DSP_TAP26_ALNOKO |
4038 MII_TG3_DSP_TAP26_RMRXSTO |
4039 MII_TG3_DSP_TAP26_OPCSINPT;
4040 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4044 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4045 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4046 MII_TG3_DSP_CH34TP2_HIBW01);
4049 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4058 static void tg3_phy_copper_begin(struct tg3 *tp)
4060 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4061 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4064 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4065 adv = ADVERTISED_10baseT_Half |
4066 ADVERTISED_10baseT_Full;
4067 if (tg3_flag(tp, WOL_SPEED_100MB))
4068 adv |= ADVERTISED_100baseT_Half |
4069 ADVERTISED_100baseT_Full;
4071 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4073 adv = tp->link_config.advertising;
4074 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4075 adv &= ~(ADVERTISED_1000baseT_Half |
4076 ADVERTISED_1000baseT_Full);
4078 fc = tp->link_config.flowctrl;
4081 tg3_phy_autoneg_cfg(tp, adv, fc);
4083 tg3_writephy(tp, MII_BMCR,
4084 BMCR_ANENABLE | BMCR_ANRESTART);
4087 u32 bmcr, orig_bmcr;
4089 tp->link_config.active_speed = tp->link_config.speed;
4090 tp->link_config.active_duplex = tp->link_config.duplex;
4093 switch (tp->link_config.speed) {
4099 bmcr |= BMCR_SPEED100;
4103 bmcr |= BMCR_SPEED1000;
4107 if (tp->link_config.duplex == DUPLEX_FULL)
4108 bmcr |= BMCR_FULLDPLX;
4110 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4111 (bmcr != orig_bmcr)) {
4112 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4113 for (i = 0; i < 1500; i++) {
4117 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4118 tg3_readphy(tp, MII_BMSR, &tmp))
4120 if (!(tmp & BMSR_LSTATUS)) {
4125 tg3_writephy(tp, MII_BMCR, bmcr);
4131 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4135 /* Turn off tap power management. */
4136 /* Set Extended packet length bit */
4137 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4139 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4140 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4141 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4142 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4143 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4150 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4152 u32 advmsk, tgtadv, advertising;
4154 advertising = tp->link_config.advertising;
4155 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4157 advmsk = ADVERTISE_ALL;
4158 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4159 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4160 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4163 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4166 if ((*lcladv & advmsk) != tgtadv)
4169 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4172 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4174 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4178 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4179 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4180 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4181 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4182 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4184 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4187 if (tg3_ctrl != tgtadv)
4194 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4198 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4201 if (tg3_readphy(tp, MII_STAT1000, &val))
4204 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4207 if (tg3_readphy(tp, MII_LPA, rmtadv))
4210 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4211 tp->link_config.rmt_adv = lpeth;
4216 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4218 if (curr_link_up != tp->link_up) {
4222 tg3_carrier_off(tp);
4223 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4224 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4227 tg3_link_report(tp);
4234 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4236 int current_link_up;
4238 u32 lcl_adv, rmt_adv;
4246 (MAC_STATUS_SYNC_CHANGED |
4247 MAC_STATUS_CFG_CHANGED |
4248 MAC_STATUS_MI_COMPLETION |
4249 MAC_STATUS_LNKSTATE_CHANGED));
4252 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4254 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4258 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4260 /* Some third-party PHYs need to be reset on link going
4263 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4267 tg3_readphy(tp, MII_BMSR, &bmsr);
4268 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4269 !(bmsr & BMSR_LSTATUS))
4275 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4276 tg3_readphy(tp, MII_BMSR, &bmsr);
4277 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4278 !tg3_flag(tp, INIT_COMPLETE))
4281 if (!(bmsr & BMSR_LSTATUS)) {
4282 err = tg3_init_5401phy_dsp(tp);
4286 tg3_readphy(tp, MII_BMSR, &bmsr);
4287 for (i = 0; i < 1000; i++) {
4289 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4290 (bmsr & BMSR_LSTATUS)) {
4296 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4297 TG3_PHY_REV_BCM5401_B0 &&
4298 !(bmsr & BMSR_LSTATUS) &&
4299 tp->link_config.active_speed == SPEED_1000) {
4300 err = tg3_phy_reset(tp);
4302 err = tg3_init_5401phy_dsp(tp);
4307 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4308 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4309 /* 5701 {A0,B0} CRC bug workaround */
4310 tg3_writephy(tp, 0x15, 0x0a75);
4311 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4312 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4313 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4316 /* Clear pending interrupts... */
4317 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4318 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4320 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4321 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4322 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4323 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4327 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4328 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4329 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4331 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4334 current_link_up = 0;
4335 current_speed = SPEED_UNKNOWN;
4336 current_duplex = DUPLEX_UNKNOWN;
4337 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4338 tp->link_config.rmt_adv = 0;
4340 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4341 err = tg3_phy_auxctl_read(tp,
4342 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4344 if (!err && !(val & (1 << 10))) {
4345 tg3_phy_auxctl_write(tp,
4346 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4353 for (i = 0; i < 100; i++) {
4354 tg3_readphy(tp, MII_BMSR, &bmsr);
4355 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4356 (bmsr & BMSR_LSTATUS))
4361 if (bmsr & BMSR_LSTATUS) {
4364 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4365 for (i = 0; i < 2000; i++) {
4367 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4372 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4377 for (i = 0; i < 200; i++) {
4378 tg3_readphy(tp, MII_BMCR, &bmcr);
4379 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4381 if (bmcr && bmcr != 0x7fff)
4389 tp->link_config.active_speed = current_speed;
4390 tp->link_config.active_duplex = current_duplex;
4392 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4393 if ((bmcr & BMCR_ANENABLE) &&
4394 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4395 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4396 current_link_up = 1;
4398 if (!(bmcr & BMCR_ANENABLE) &&
4399 tp->link_config.speed == current_speed &&
4400 tp->link_config.duplex == current_duplex &&
4401 tp->link_config.flowctrl ==
4402 tp->link_config.active_flowctrl) {
4403 current_link_up = 1;
4407 if (current_link_up == 1 &&
4408 tp->link_config.active_duplex == DUPLEX_FULL) {
4411 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4412 reg = MII_TG3_FET_GEN_STAT;
4413 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4415 reg = MII_TG3_EXT_STAT;
4416 bit = MII_TG3_EXT_STAT_MDIX;
4419 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4420 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4422 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4427 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4428 tg3_phy_copper_begin(tp);
4430 tg3_readphy(tp, MII_BMSR, &bmsr);
4431 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4432 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4433 current_link_up = 1;
4436 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4437 if (current_link_up == 1) {
4438 if (tp->link_config.active_speed == SPEED_100 ||
4439 tp->link_config.active_speed == SPEED_10)
4440 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4442 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4443 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4444 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4446 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4448 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4449 if (tp->link_config.active_duplex == DUPLEX_HALF)
4450 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4453 if (current_link_up == 1 &&
4454 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4455 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4457 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4460 /* ??? Without this setting Netgear GA302T PHY does not
4461 * ??? send/receive packets...
4463 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4464 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4465 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4466 tw32_f(MAC_MI_MODE, tp->mi_mode);
4470 tw32_f(MAC_MODE, tp->mac_mode);
4473 tg3_phy_eee_adjust(tp, current_link_up);
4475 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4476 /* Polled via timer. */
4477 tw32_f(MAC_EVENT, 0);
4479 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4484 current_link_up == 1 &&
4485 tp->link_config.active_speed == SPEED_1000 &&
4486 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4489 (MAC_STATUS_SYNC_CHANGED |
4490 MAC_STATUS_CFG_CHANGED));
4493 NIC_SRAM_FIRMWARE_MBOX,
4494 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4497 /* Prevent send BD corruption. */
4498 if (tg3_flag(tp, CLKREQ_BUG)) {
4499 if (tp->link_config.active_speed == SPEED_100 ||
4500 tp->link_config.active_speed == SPEED_10)
4501 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4502 PCI_EXP_LNKCTL_CLKREQ_EN);
4504 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4505 PCI_EXP_LNKCTL_CLKREQ_EN);
4508 tg3_test_and_report_link_chg(tp, current_link_up);
4513 struct tg3_fiber_aneginfo {
4515 #define ANEG_STATE_UNKNOWN 0
4516 #define ANEG_STATE_AN_ENABLE 1
4517 #define ANEG_STATE_RESTART_INIT 2
4518 #define ANEG_STATE_RESTART 3
4519 #define ANEG_STATE_DISABLE_LINK_OK 4
4520 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4521 #define ANEG_STATE_ABILITY_DETECT 6
4522 #define ANEG_STATE_ACK_DETECT_INIT 7
4523 #define ANEG_STATE_ACK_DETECT 8
4524 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4525 #define ANEG_STATE_COMPLETE_ACK 10
4526 #define ANEG_STATE_IDLE_DETECT_INIT 11
4527 #define ANEG_STATE_IDLE_DETECT 12
4528 #define ANEG_STATE_LINK_OK 13
4529 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4530 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4533 #define MR_AN_ENABLE 0x00000001
4534 #define MR_RESTART_AN 0x00000002
4535 #define MR_AN_COMPLETE 0x00000004
4536 #define MR_PAGE_RX 0x00000008
4537 #define MR_NP_LOADED 0x00000010
4538 #define MR_TOGGLE_TX 0x00000020
4539 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4540 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4541 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4542 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4543 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4544 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4545 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4546 #define MR_TOGGLE_RX 0x00002000
4547 #define MR_NP_RX 0x00004000
4549 #define MR_LINK_OK 0x80000000
4551 unsigned long link_time, cur_time;
4553 u32 ability_match_cfg;
4554 int ability_match_count;
4556 char ability_match, idle_match, ack_match;
4558 u32 txconfig, rxconfig;
4559 #define ANEG_CFG_NP 0x00000080
4560 #define ANEG_CFG_ACK 0x00000040
4561 #define ANEG_CFG_RF2 0x00000020
4562 #define ANEG_CFG_RF1 0x00000010
4563 #define ANEG_CFG_PS2 0x00000001
4564 #define ANEG_CFG_PS1 0x00008000
4565 #define ANEG_CFG_HD 0x00004000
4566 #define ANEG_CFG_FD 0x00002000
4567 #define ANEG_CFG_INVAL 0x00001f06
4572 #define ANEG_TIMER_ENAB 2
4573 #define ANEG_FAILED -1
4575 #define ANEG_STATE_SETTLE_TIME 10000
4577 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4578 struct tg3_fiber_aneginfo *ap)
4581 unsigned long delta;
4585 if (ap->state == ANEG_STATE_UNKNOWN) {
4589 ap->ability_match_cfg = 0;
4590 ap->ability_match_count = 0;
4591 ap->ability_match = 0;
4597 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4598 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4600 if (rx_cfg_reg != ap->ability_match_cfg) {
4601 ap->ability_match_cfg = rx_cfg_reg;
4602 ap->ability_match = 0;
4603 ap->ability_match_count = 0;
4605 if (++ap->ability_match_count > 1) {
4606 ap->ability_match = 1;
4607 ap->ability_match_cfg = rx_cfg_reg;
4610 if (rx_cfg_reg & ANEG_CFG_ACK)
4618 ap->ability_match_cfg = 0;
4619 ap->ability_match_count = 0;
4620 ap->ability_match = 0;
4626 ap->rxconfig = rx_cfg_reg;
4629 switch (ap->state) {
4630 case ANEG_STATE_UNKNOWN:
4631 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4632 ap->state = ANEG_STATE_AN_ENABLE;
4635 case ANEG_STATE_AN_ENABLE:
4636 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4637 if (ap->flags & MR_AN_ENABLE) {
4640 ap->ability_match_cfg = 0;
4641 ap->ability_match_count = 0;
4642 ap->ability_match = 0;
4646 ap->state = ANEG_STATE_RESTART_INIT;
4648 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4652 case ANEG_STATE_RESTART_INIT:
4653 ap->link_time = ap->cur_time;
4654 ap->flags &= ~(MR_NP_LOADED);
4656 tw32(MAC_TX_AUTO_NEG, 0);
4657 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4658 tw32_f(MAC_MODE, tp->mac_mode);
4661 ret = ANEG_TIMER_ENAB;
4662 ap->state = ANEG_STATE_RESTART;
4665 case ANEG_STATE_RESTART:
4666 delta = ap->cur_time - ap->link_time;
4667 if (delta > ANEG_STATE_SETTLE_TIME)
4668 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4670 ret = ANEG_TIMER_ENAB;
4673 case ANEG_STATE_DISABLE_LINK_OK:
4677 case ANEG_STATE_ABILITY_DETECT_INIT:
4678 ap->flags &= ~(MR_TOGGLE_TX);
4679 ap->txconfig = ANEG_CFG_FD;
4680 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4681 if (flowctrl & ADVERTISE_1000XPAUSE)
4682 ap->txconfig |= ANEG_CFG_PS1;
4683 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4684 ap->txconfig |= ANEG_CFG_PS2;
4685 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4686 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4687 tw32_f(MAC_MODE, tp->mac_mode);
4690 ap->state = ANEG_STATE_ABILITY_DETECT;
4693 case ANEG_STATE_ABILITY_DETECT:
4694 if (ap->ability_match != 0 && ap->rxconfig != 0)
4695 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4698 case ANEG_STATE_ACK_DETECT_INIT:
4699 ap->txconfig |= ANEG_CFG_ACK;
4700 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4701 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4702 tw32_f(MAC_MODE, tp->mac_mode);
4705 ap->state = ANEG_STATE_ACK_DETECT;
4708 case ANEG_STATE_ACK_DETECT:
4709 if (ap->ack_match != 0) {
4710 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4711 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4712 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4714 ap->state = ANEG_STATE_AN_ENABLE;
4716 } else if (ap->ability_match != 0 &&
4717 ap->rxconfig == 0) {
4718 ap->state = ANEG_STATE_AN_ENABLE;
4722 case ANEG_STATE_COMPLETE_ACK_INIT:
4723 if (ap->rxconfig & ANEG_CFG_INVAL) {
4727 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4728 MR_LP_ADV_HALF_DUPLEX |
4729 MR_LP_ADV_SYM_PAUSE |
4730 MR_LP_ADV_ASYM_PAUSE |
4731 MR_LP_ADV_REMOTE_FAULT1 |
4732 MR_LP_ADV_REMOTE_FAULT2 |
4733 MR_LP_ADV_NEXT_PAGE |
4736 if (ap->rxconfig & ANEG_CFG_FD)
4737 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4738 if (ap->rxconfig & ANEG_CFG_HD)
4739 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4740 if (ap->rxconfig & ANEG_CFG_PS1)
4741 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4742 if (ap->rxconfig & ANEG_CFG_PS2)
4743 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4744 if (ap->rxconfig & ANEG_CFG_RF1)
4745 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4746 if (ap->rxconfig & ANEG_CFG_RF2)
4747 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4748 if (ap->rxconfig & ANEG_CFG_NP)
4749 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4751 ap->link_time = ap->cur_time;
4753 ap->flags ^= (MR_TOGGLE_TX);
4754 if (ap->rxconfig & 0x0008)
4755 ap->flags |= MR_TOGGLE_RX;
4756 if (ap->rxconfig & ANEG_CFG_NP)
4757 ap->flags |= MR_NP_RX;
4758 ap->flags |= MR_PAGE_RX;
4760 ap->state = ANEG_STATE_COMPLETE_ACK;
4761 ret = ANEG_TIMER_ENAB;
4764 case ANEG_STATE_COMPLETE_ACK:
4765 if (ap->ability_match != 0 &&
4766 ap->rxconfig == 0) {
4767 ap->state = ANEG_STATE_AN_ENABLE;
4770 delta = ap->cur_time - ap->link_time;
4771 if (delta > ANEG_STATE_SETTLE_TIME) {
4772 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4773 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4775 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4776 !(ap->flags & MR_NP_RX)) {
4777 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4785 case ANEG_STATE_IDLE_DETECT_INIT:
4786 ap->link_time = ap->cur_time;
4787 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4788 tw32_f(MAC_MODE, tp->mac_mode);
4791 ap->state = ANEG_STATE_IDLE_DETECT;
4792 ret = ANEG_TIMER_ENAB;
4795 case ANEG_STATE_IDLE_DETECT:
4796 if (ap->ability_match != 0 &&
4797 ap->rxconfig == 0) {
4798 ap->state = ANEG_STATE_AN_ENABLE;
4801 delta = ap->cur_time - ap->link_time;
4802 if (delta > ANEG_STATE_SETTLE_TIME) {
4803 /* XXX another gem from the Broadcom driver :( */
4804 ap->state = ANEG_STATE_LINK_OK;
4808 case ANEG_STATE_LINK_OK:
4809 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4813 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4814 /* ??? unimplemented */
4817 case ANEG_STATE_NEXT_PAGE_WAIT:
4818 /* ??? unimplemented */
4829 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4832 struct tg3_fiber_aneginfo aninfo;
4833 int status = ANEG_FAILED;
4837 tw32_f(MAC_TX_AUTO_NEG, 0);
4839 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4840 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4843 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4846 memset(&aninfo, 0, sizeof(aninfo));
4847 aninfo.flags |= MR_AN_ENABLE;
4848 aninfo.state = ANEG_STATE_UNKNOWN;
4849 aninfo.cur_time = 0;
4851 while (++tick < 195000) {
4852 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4853 if (status == ANEG_DONE || status == ANEG_FAILED)
4859 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4860 tw32_f(MAC_MODE, tp->mac_mode);
4863 *txflags = aninfo.txconfig;
4864 *rxflags = aninfo.flags;
4866 if (status == ANEG_DONE &&
4867 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4868 MR_LP_ADV_FULL_DUPLEX)))
4874 static void tg3_init_bcm8002(struct tg3 *tp)
4876 u32 mac_status = tr32(MAC_STATUS);
4879 /* Reset when initting first time or we have a link. */
4880 if (tg3_flag(tp, INIT_COMPLETE) &&
4881 !(mac_status & MAC_STATUS_PCS_SYNCED))
4884 /* Set PLL lock range. */
4885 tg3_writephy(tp, 0x16, 0x8007);
4888 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4890 /* Wait for reset to complete. */
4891 /* XXX schedule_timeout() ... */
4892 for (i = 0; i < 500; i++)
4895 /* Config mode; select PMA/Ch 1 regs. */
4896 tg3_writephy(tp, 0x10, 0x8411);
4898 /* Enable auto-lock and comdet, select txclk for tx. */
4899 tg3_writephy(tp, 0x11, 0x0a10);
4901 tg3_writephy(tp, 0x18, 0x00a0);
4902 tg3_writephy(tp, 0x16, 0x41ff);
4904 /* Assert and deassert POR. */
4905 tg3_writephy(tp, 0x13, 0x0400);
4907 tg3_writephy(tp, 0x13, 0x0000);
4909 tg3_writephy(tp, 0x11, 0x0a50);
4911 tg3_writephy(tp, 0x11, 0x0a10);
4913 /* Wait for signal to stabilize */
4914 /* XXX schedule_timeout() ... */
4915 for (i = 0; i < 15000; i++)
4918 /* Deselect the channel register so we can read the PHYID
4921 tg3_writephy(tp, 0x10, 0x8011);
4924 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4927 u32 sg_dig_ctrl, sg_dig_status;
4928 u32 serdes_cfg, expected_sg_dig_ctrl;
4929 int workaround, port_a;
4930 int current_link_up;
4933 expected_sg_dig_ctrl = 0;
4936 current_link_up = 0;
4938 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4939 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4941 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4944 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4945 /* preserve bits 20-23 for voltage regulator */
4946 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4949 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4951 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4952 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4954 u32 val = serdes_cfg;
4960 tw32_f(MAC_SERDES_CFG, val);
4963 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4965 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4966 tg3_setup_flow_control(tp, 0, 0);
4967 current_link_up = 1;
4972 /* Want auto-negotiation. */
4973 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4975 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4976 if (flowctrl & ADVERTISE_1000XPAUSE)
4977 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4978 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4979 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4981 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4982 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4983 tp->serdes_counter &&
4984 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4985 MAC_STATUS_RCVD_CFG)) ==
4986 MAC_STATUS_PCS_SYNCED)) {
4987 tp->serdes_counter--;
4988 current_link_up = 1;
4993 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4994 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4996 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4998 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4999 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5000 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5001 MAC_STATUS_SIGNAL_DET)) {
5002 sg_dig_status = tr32(SG_DIG_STATUS);
5003 mac_status = tr32(MAC_STATUS);
5005 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5006 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5007 u32 local_adv = 0, remote_adv = 0;
5009 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5010 local_adv |= ADVERTISE_1000XPAUSE;
5011 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5012 local_adv |= ADVERTISE_1000XPSE_ASYM;
5014 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5015 remote_adv |= LPA_1000XPAUSE;
5016 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5017 remote_adv |= LPA_1000XPAUSE_ASYM;
5019 tp->link_config.rmt_adv =
5020 mii_adv_to_ethtool_adv_x(remote_adv);
5022 tg3_setup_flow_control(tp, local_adv, remote_adv);
5023 current_link_up = 1;
5024 tp->serdes_counter = 0;
5025 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5026 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5027 if (tp->serdes_counter)
5028 tp->serdes_counter--;
5031 u32 val = serdes_cfg;
5038 tw32_f(MAC_SERDES_CFG, val);
5041 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5044 /* Link parallel detection - link is up */
5045 /* only if we have PCS_SYNC and not */
5046 /* receiving config code words */
5047 mac_status = tr32(MAC_STATUS);
5048 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5049 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5050 tg3_setup_flow_control(tp, 0, 0);
5051 current_link_up = 1;
5053 TG3_PHYFLG_PARALLEL_DETECT;
5054 tp->serdes_counter =
5055 SERDES_PARALLEL_DET_TIMEOUT;
5057 goto restart_autoneg;
5061 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5062 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5066 return current_link_up;
5069 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5071 int current_link_up = 0;
5073 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5076 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5077 u32 txflags, rxflags;
5080 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5081 u32 local_adv = 0, remote_adv = 0;
5083 if (txflags & ANEG_CFG_PS1)
5084 local_adv |= ADVERTISE_1000XPAUSE;
5085 if (txflags & ANEG_CFG_PS2)
5086 local_adv |= ADVERTISE_1000XPSE_ASYM;
5088 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5089 remote_adv |= LPA_1000XPAUSE;
5090 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5091 remote_adv |= LPA_1000XPAUSE_ASYM;
5093 tp->link_config.rmt_adv =
5094 mii_adv_to_ethtool_adv_x(remote_adv);
5096 tg3_setup_flow_control(tp, local_adv, remote_adv);
5098 current_link_up = 1;
5100 for (i = 0; i < 30; i++) {
5103 (MAC_STATUS_SYNC_CHANGED |
5104 MAC_STATUS_CFG_CHANGED));
5106 if ((tr32(MAC_STATUS) &
5107 (MAC_STATUS_SYNC_CHANGED |
5108 MAC_STATUS_CFG_CHANGED)) == 0)
5112 mac_status = tr32(MAC_STATUS);
5113 if (current_link_up == 0 &&
5114 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5115 !(mac_status & MAC_STATUS_RCVD_CFG))
5116 current_link_up = 1;
5118 tg3_setup_flow_control(tp, 0, 0);
5120 /* Forcing 1000FD link up. */
5121 current_link_up = 1;
5123 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5126 tw32_f(MAC_MODE, tp->mac_mode);
5131 return current_link_up;
5134 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5137 u16 orig_active_speed;
5138 u8 orig_active_duplex;
5140 int current_link_up;
5143 orig_pause_cfg = tp->link_config.active_flowctrl;
5144 orig_active_speed = tp->link_config.active_speed;
5145 orig_active_duplex = tp->link_config.active_duplex;
5147 if (!tg3_flag(tp, HW_AUTONEG) &&
5149 tg3_flag(tp, INIT_COMPLETE)) {
5150 mac_status = tr32(MAC_STATUS);
5151 mac_status &= (MAC_STATUS_PCS_SYNCED |
5152 MAC_STATUS_SIGNAL_DET |
5153 MAC_STATUS_CFG_CHANGED |
5154 MAC_STATUS_RCVD_CFG);
5155 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5156 MAC_STATUS_SIGNAL_DET)) {
5157 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5158 MAC_STATUS_CFG_CHANGED));
5163 tw32_f(MAC_TX_AUTO_NEG, 0);
5165 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5166 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5167 tw32_f(MAC_MODE, tp->mac_mode);
5170 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5171 tg3_init_bcm8002(tp);
5173 /* Enable link change event even when serdes polling. */
5174 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5177 current_link_up = 0;
5178 tp->link_config.rmt_adv = 0;
5179 mac_status = tr32(MAC_STATUS);
5181 if (tg3_flag(tp, HW_AUTONEG))
5182 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5184 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5186 tp->napi[0].hw_status->status =
5187 (SD_STATUS_UPDATED |
5188 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5190 for (i = 0; i < 100; i++) {
5191 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5192 MAC_STATUS_CFG_CHANGED));
5194 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5195 MAC_STATUS_CFG_CHANGED |
5196 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5200 mac_status = tr32(MAC_STATUS);
5201 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5202 current_link_up = 0;
5203 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5204 tp->serdes_counter == 0) {
5205 tw32_f(MAC_MODE, (tp->mac_mode |
5206 MAC_MODE_SEND_CONFIGS));
5208 tw32_f(MAC_MODE, tp->mac_mode);
5212 if (current_link_up == 1) {
5213 tp->link_config.active_speed = SPEED_1000;
5214 tp->link_config.active_duplex = DUPLEX_FULL;
5215 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5216 LED_CTRL_LNKLED_OVERRIDE |
5217 LED_CTRL_1000MBPS_ON));
5219 tp->link_config.active_speed = SPEED_UNKNOWN;
5220 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5221 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5222 LED_CTRL_LNKLED_OVERRIDE |
5223 LED_CTRL_TRAFFIC_OVERRIDE));
5226 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5227 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5228 if (orig_pause_cfg != now_pause_cfg ||
5229 orig_active_speed != tp->link_config.active_speed ||
5230 orig_active_duplex != tp->link_config.active_duplex)
5231 tg3_link_report(tp);
5237 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5239 int current_link_up, err = 0;
5243 u32 local_adv, remote_adv;
5245 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5246 tw32_f(MAC_MODE, tp->mac_mode);
5252 (MAC_STATUS_SYNC_CHANGED |
5253 MAC_STATUS_CFG_CHANGED |
5254 MAC_STATUS_MI_COMPLETION |
5255 MAC_STATUS_LNKSTATE_CHANGED));
5261 current_link_up = 0;
5262 current_speed = SPEED_UNKNOWN;
5263 current_duplex = DUPLEX_UNKNOWN;
5264 tp->link_config.rmt_adv = 0;
5266 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5267 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5268 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5269 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5270 bmsr |= BMSR_LSTATUS;
5272 bmsr &= ~BMSR_LSTATUS;
5275 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5277 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5278 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5279 /* do nothing, just check for link up at the end */
5280 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5283 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5284 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5285 ADVERTISE_1000XPAUSE |
5286 ADVERTISE_1000XPSE_ASYM |
5289 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5290 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5292 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5293 tg3_writephy(tp, MII_ADVERTISE, newadv);
5294 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5295 tg3_writephy(tp, MII_BMCR, bmcr);
5297 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5298 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5299 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5306 bmcr &= ~BMCR_SPEED1000;
5307 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5309 if (tp->link_config.duplex == DUPLEX_FULL)
5310 new_bmcr |= BMCR_FULLDPLX;
5312 if (new_bmcr != bmcr) {
5313 /* BMCR_SPEED1000 is a reserved bit that needs
5314 * to be set on write.
5316 new_bmcr |= BMCR_SPEED1000;
5318 /* Force a linkdown */
5322 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5323 adv &= ~(ADVERTISE_1000XFULL |
5324 ADVERTISE_1000XHALF |
5326 tg3_writephy(tp, MII_ADVERTISE, adv);
5327 tg3_writephy(tp, MII_BMCR, bmcr |
5331 tg3_carrier_off(tp);
5333 tg3_writephy(tp, MII_BMCR, new_bmcr);
5335 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5336 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5337 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5339 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5340 bmsr |= BMSR_LSTATUS;
5342 bmsr &= ~BMSR_LSTATUS;
5344 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5348 if (bmsr & BMSR_LSTATUS) {
5349 current_speed = SPEED_1000;
5350 current_link_up = 1;
5351 if (bmcr & BMCR_FULLDPLX)
5352 current_duplex = DUPLEX_FULL;
5354 current_duplex = DUPLEX_HALF;
5359 if (bmcr & BMCR_ANENABLE) {
5362 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5363 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5364 common = local_adv & remote_adv;
5365 if (common & (ADVERTISE_1000XHALF |
5366 ADVERTISE_1000XFULL)) {
5367 if (common & ADVERTISE_1000XFULL)
5368 current_duplex = DUPLEX_FULL;
5370 current_duplex = DUPLEX_HALF;
5372 tp->link_config.rmt_adv =
5373 mii_adv_to_ethtool_adv_x(remote_adv);
5374 } else if (!tg3_flag(tp, 5780_CLASS)) {
5375 /* Link is up via parallel detect */
5377 current_link_up = 0;
5382 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5383 tg3_setup_flow_control(tp, local_adv, remote_adv);
5385 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5386 if (tp->link_config.active_duplex == DUPLEX_HALF)
5387 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5389 tw32_f(MAC_MODE, tp->mac_mode);
5392 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5394 tp->link_config.active_speed = current_speed;
5395 tp->link_config.active_duplex = current_duplex;
5397 tg3_test_and_report_link_chg(tp, current_link_up);
5401 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5403 if (tp->serdes_counter) {
5404 /* Give autoneg time to complete. */
5405 tp->serdes_counter--;
5410 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5413 tg3_readphy(tp, MII_BMCR, &bmcr);
5414 if (bmcr & BMCR_ANENABLE) {
5417 /* Select shadow register 0x1f */
5418 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5419 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5421 /* Select expansion interrupt status register */
5422 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5423 MII_TG3_DSP_EXP1_INT_STAT);
5424 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5425 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5427 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5428 /* We have signal detect and not receiving
5429 * config code words, link is up by parallel
5433 bmcr &= ~BMCR_ANENABLE;
5434 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5435 tg3_writephy(tp, MII_BMCR, bmcr);
5436 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5439 } else if (tp->link_up &&
5440 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5441 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5444 /* Select expansion interrupt status register */
5445 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5446 MII_TG3_DSP_EXP1_INT_STAT);
5447 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5451 /* Config code words received, turn on autoneg. */
5452 tg3_readphy(tp, MII_BMCR, &bmcr);
5453 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5455 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5461 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5466 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5467 err = tg3_setup_fiber_phy(tp, force_reset);
5468 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5469 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5471 err = tg3_setup_copper_phy(tp, force_reset);
5473 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5476 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5477 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5479 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5484 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5485 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5486 tw32(GRC_MISC_CFG, val);
5489 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5490 (6 << TX_LENGTHS_IPG_SHIFT);
5491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5493 val |= tr32(MAC_TX_LENGTHS) &
5494 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5495 TX_LENGTHS_CNT_DWN_VAL_MSK);
5497 if (tp->link_config.active_speed == SPEED_1000 &&
5498 tp->link_config.active_duplex == DUPLEX_HALF)
5499 tw32(MAC_TX_LENGTHS, val |
5500 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5502 tw32(MAC_TX_LENGTHS, val |
5503 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5505 if (!tg3_flag(tp, 5705_PLUS)) {
5507 tw32(HOSTCC_STAT_COAL_TICKS,
5508 tp->coal.stats_block_coalesce_usecs);
5510 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5514 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5515 val = tr32(PCIE_PWR_MGMT_THRESH);
5517 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5520 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5521 tw32(PCIE_PWR_MGMT_THRESH, val);
5527 /* tp->lock must be held */
5528 static u64 tg3_refclk_read(struct tg3 *tp)
5530 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5531 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5534 /* tp->lock must be held */
5535 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5537 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5538 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5539 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5540 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5543 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5544 static inline void tg3_full_unlock(struct tg3 *tp);
5545 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5547 struct tg3 *tp = netdev_priv(dev);
5549 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5550 SOF_TIMESTAMPING_RX_SOFTWARE |
5551 SOF_TIMESTAMPING_SOFTWARE |
5552 SOF_TIMESTAMPING_TX_HARDWARE |
5553 SOF_TIMESTAMPING_RX_HARDWARE |
5554 SOF_TIMESTAMPING_RAW_HARDWARE;
5557 info->phc_index = ptp_clock_index(tp->ptp_clock);
5559 info->phc_index = -1;
5561 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5563 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5564 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5565 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5566 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5570 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5572 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5573 bool neg_adj = false;
5581 /* Frequency adjustment is performed using hardware with a 24 bit
5582 * accumulator and a programmable correction value. On each clk, the
5583 * correction value gets added to the accumulator and when it
5584 * overflows, the time counter is incremented/decremented.
5586 * So conversion from ppb to correction value is
5587 * ppb * (1 << 24) / 1000000000
5589 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5590 TG3_EAV_REF_CLK_CORRECT_MASK;
5592 tg3_full_lock(tp, 0);
5595 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5596 TG3_EAV_REF_CLK_CORRECT_EN |
5597 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5599 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5601 tg3_full_unlock(tp);
5606 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5608 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5610 tg3_full_lock(tp, 0);
5611 tp->ptp_adjust += delta;
5612 tg3_full_unlock(tp);
5617 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5621 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5623 tg3_full_lock(tp, 0);
5624 ns = tg3_refclk_read(tp);
5625 ns += tp->ptp_adjust;
5626 tg3_full_unlock(tp);
5628 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5629 ts->tv_nsec = remainder;
5634 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5635 const struct timespec *ts)
5638 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5640 ns = timespec_to_ns(ts);
5642 tg3_full_lock(tp, 0);
5643 tg3_refclk_write(tp, ns);
5645 tg3_full_unlock(tp);
5650 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5651 struct ptp_clock_request *rq, int on)
5656 static const struct ptp_clock_info tg3_ptp_caps = {
5657 .owner = THIS_MODULE,
5658 .name = "tg3 clock",
5659 .max_adj = 250000000,
5664 .adjfreq = tg3_ptp_adjfreq,
5665 .adjtime = tg3_ptp_adjtime,
5666 .gettime = tg3_ptp_gettime,
5667 .settime = tg3_ptp_settime,
5668 .enable = tg3_ptp_enable,
5671 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5672 struct skb_shared_hwtstamps *timestamp)
5674 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5675 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5679 /* tp->lock must be held */
5680 static void tg3_ptp_init(struct tg3 *tp)
5682 if (!tg3_flag(tp, PTP_CAPABLE))
5685 /* Initialize the hardware clock to the system time. */
5686 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5688 tp->ptp_info = tg3_ptp_caps;
5691 /* tp->lock must be held */
5692 static void tg3_ptp_resume(struct tg3 *tp)
5694 if (!tg3_flag(tp, PTP_CAPABLE))
5697 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5701 static void tg3_ptp_fini(struct tg3 *tp)
5703 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5706 ptp_clock_unregister(tp->ptp_clock);
5707 tp->ptp_clock = NULL;
5711 static inline int tg3_irq_sync(struct tg3 *tp)
5713 return tp->irq_sync;
5716 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5720 dst = (u32 *)((u8 *)dst + off);
5721 for (i = 0; i < len; i += sizeof(u32))
5722 *dst++ = tr32(off + i);
5725 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5727 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5728 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5729 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5730 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5731 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5732 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5733 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5734 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5735 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5736 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5737 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5738 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5739 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5740 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5741 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5742 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5743 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5744 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5745 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5747 if (tg3_flag(tp, SUPPORT_MSIX))
5748 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5750 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5751 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5752 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5753 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5754 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5755 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5756 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5757 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5759 if (!tg3_flag(tp, 5705_PLUS)) {
5760 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5761 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5762 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5765 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5766 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5767 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5768 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5769 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5771 if (tg3_flag(tp, NVRAM))
5772 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5775 static void tg3_dump_state(struct tg3 *tp)
5780 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5782 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5786 if (tg3_flag(tp, PCI_EXPRESS)) {
5787 /* Read up to but not including private PCI registers */
5788 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5789 regs[i / sizeof(u32)] = tr32(i);
5791 tg3_dump_legacy_regs(tp, regs);
5793 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5794 if (!regs[i + 0] && !regs[i + 1] &&
5795 !regs[i + 2] && !regs[i + 3])
5798 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5800 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5805 for (i = 0; i < tp->irq_cnt; i++) {
5806 struct tg3_napi *tnapi = &tp->napi[i];
5808 /* SW status block */
5810 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5812 tnapi->hw_status->status,
5813 tnapi->hw_status->status_tag,
5814 tnapi->hw_status->rx_jumbo_consumer,
5815 tnapi->hw_status->rx_consumer,
5816 tnapi->hw_status->rx_mini_consumer,
5817 tnapi->hw_status->idx[0].rx_producer,
5818 tnapi->hw_status->idx[0].tx_consumer);
5821 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5823 tnapi->last_tag, tnapi->last_irq_tag,
5824 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5826 tnapi->prodring.rx_std_prod_idx,
5827 tnapi->prodring.rx_std_cons_idx,
5828 tnapi->prodring.rx_jmb_prod_idx,
5829 tnapi->prodring.rx_jmb_cons_idx);
5833 /* This is called whenever we suspect that the system chipset is re-
5834 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5835 * is bogus tx completions. We try to recover by setting the
5836 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5839 static void tg3_tx_recover(struct tg3 *tp)
5841 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5842 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5844 netdev_warn(tp->dev,
5845 "The system may be re-ordering memory-mapped I/O "
5846 "cycles to the network device, attempting to recover. "
5847 "Please report the problem to the driver maintainer "
5848 "and include system chipset information.\n");
5850 spin_lock(&tp->lock);
5851 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5852 spin_unlock(&tp->lock);
5855 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5857 /* Tell compiler to fetch tx indices from memory. */
5859 return tnapi->tx_pending -
5860 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5863 /* Tigon3 never reports partial packet sends. So we do not
5864 * need special logic to handle SKBs that have not had all
5865 * of their frags sent yet, like SunGEM does.
5867 static void tg3_tx(struct tg3_napi *tnapi)
5869 struct tg3 *tp = tnapi->tp;
5870 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5871 u32 sw_idx = tnapi->tx_cons;
5872 struct netdev_queue *txq;
5873 int index = tnapi - tp->napi;
5874 unsigned int pkts_compl = 0, bytes_compl = 0;
5876 if (tg3_flag(tp, ENABLE_TSS))
5879 txq = netdev_get_tx_queue(tp->dev, index);
5881 while (sw_idx != hw_idx) {
5882 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5883 struct sk_buff *skb = ri->skb;
5886 if (unlikely(skb == NULL)) {
5891 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5892 struct skb_shared_hwtstamps timestamp;
5893 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5894 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5896 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
5898 skb_tstamp_tx(skb, ×tamp);
5901 pci_unmap_single(tp->pdev,
5902 dma_unmap_addr(ri, mapping),
5908 while (ri->fragmented) {
5909 ri->fragmented = false;
5910 sw_idx = NEXT_TX(sw_idx);
5911 ri = &tnapi->tx_buffers[sw_idx];
5914 sw_idx = NEXT_TX(sw_idx);
5916 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5917 ri = &tnapi->tx_buffers[sw_idx];
5918 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5921 pci_unmap_page(tp->pdev,
5922 dma_unmap_addr(ri, mapping),
5923 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5926 while (ri->fragmented) {
5927 ri->fragmented = false;
5928 sw_idx = NEXT_TX(sw_idx);
5929 ri = &tnapi->tx_buffers[sw_idx];
5932 sw_idx = NEXT_TX(sw_idx);
5936 bytes_compl += skb->len;
5940 if (unlikely(tx_bug)) {
5946 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5948 tnapi->tx_cons = sw_idx;
5950 /* Need to make the tx_cons update visible to tg3_start_xmit()
5951 * before checking for netif_queue_stopped(). Without the
5952 * memory barrier, there is a small possibility that tg3_start_xmit()
5953 * will miss it and cause the queue to be stopped forever.
5957 if (unlikely(netif_tx_queue_stopped(txq) &&
5958 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5959 __netif_tx_lock(txq, smp_processor_id());
5960 if (netif_tx_queue_stopped(txq) &&
5961 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5962 netif_tx_wake_queue(txq);
5963 __netif_tx_unlock(txq);
5967 static void tg3_frag_free(bool is_frag, void *data)
5970 put_page(virt_to_head_page(data));
5975 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5977 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5978 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5983 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5984 map_sz, PCI_DMA_FROMDEVICE);
5985 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5990 /* Returns size of skb allocated or < 0 on error.
5992 * We only need to fill in the address because the other members
5993 * of the RX descriptor are invariant, see tg3_init_rings.
5995 * Note the purposeful assymetry of cpu vs. chip accesses. For
5996 * posting buffers we only dirty the first cache line of the RX
5997 * descriptor (containing the address). Whereas for the RX status
5998 * buffers the cpu only reads the last cacheline of the RX descriptor
5999 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6001 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6002 u32 opaque_key, u32 dest_idx_unmasked,
6003 unsigned int *frag_size)
6005 struct tg3_rx_buffer_desc *desc;
6006 struct ring_info *map;
6009 int skb_size, data_size, dest_idx;
6011 switch (opaque_key) {
6012 case RXD_OPAQUE_RING_STD:
6013 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6014 desc = &tpr->rx_std[dest_idx];
6015 map = &tpr->rx_std_buffers[dest_idx];
6016 data_size = tp->rx_pkt_map_sz;
6019 case RXD_OPAQUE_RING_JUMBO:
6020 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6021 desc = &tpr->rx_jmb[dest_idx].std;
6022 map = &tpr->rx_jmb_buffers[dest_idx];
6023 data_size = TG3_RX_JMB_MAP_SZ;
6030 /* Do not overwrite any of the map or rp information
6031 * until we are sure we can commit to a new buffer.
6033 * Callers depend upon this behavior and assume that
6034 * we leave everything unchanged if we fail.
6036 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6037 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6038 if (skb_size <= PAGE_SIZE) {
6039 data = netdev_alloc_frag(skb_size);
6040 *frag_size = skb_size;
6042 data = kmalloc(skb_size, GFP_ATOMIC);
6048 mapping = pci_map_single(tp->pdev,
6049 data + TG3_RX_OFFSET(tp),
6051 PCI_DMA_FROMDEVICE);
6052 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6053 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6058 dma_unmap_addr_set(map, mapping, mapping);
6060 desc->addr_hi = ((u64)mapping >> 32);
6061 desc->addr_lo = ((u64)mapping & 0xffffffff);
6066 /* We only need to move over in the address because the other
6067 * members of the RX descriptor are invariant. See notes above
6068 * tg3_alloc_rx_data for full details.
6070 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6071 struct tg3_rx_prodring_set *dpr,
6072 u32 opaque_key, int src_idx,
6073 u32 dest_idx_unmasked)
6075 struct tg3 *tp = tnapi->tp;
6076 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6077 struct ring_info *src_map, *dest_map;
6078 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6081 switch (opaque_key) {
6082 case RXD_OPAQUE_RING_STD:
6083 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6084 dest_desc = &dpr->rx_std[dest_idx];
6085 dest_map = &dpr->rx_std_buffers[dest_idx];
6086 src_desc = &spr->rx_std[src_idx];
6087 src_map = &spr->rx_std_buffers[src_idx];
6090 case RXD_OPAQUE_RING_JUMBO:
6091 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6092 dest_desc = &dpr->rx_jmb[dest_idx].std;
6093 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6094 src_desc = &spr->rx_jmb[src_idx].std;
6095 src_map = &spr->rx_jmb_buffers[src_idx];
6102 dest_map->data = src_map->data;
6103 dma_unmap_addr_set(dest_map, mapping,
6104 dma_unmap_addr(src_map, mapping));
6105 dest_desc->addr_hi = src_desc->addr_hi;
6106 dest_desc->addr_lo = src_desc->addr_lo;
6108 /* Ensure that the update to the skb happens after the physical
6109 * addresses have been transferred to the new BD location.
6113 src_map->data = NULL;
6116 /* The RX ring scheme is composed of multiple rings which post fresh
6117 * buffers to the chip, and one special ring the chip uses to report
6118 * status back to the host.
6120 * The special ring reports the status of received packets to the
6121 * host. The chip does not write into the original descriptor the
6122 * RX buffer was obtained from. The chip simply takes the original
6123 * descriptor as provided by the host, updates the status and length
6124 * field, then writes this into the next status ring entry.
6126 * Each ring the host uses to post buffers to the chip is described
6127 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6128 * it is first placed into the on-chip ram. When the packet's length
6129 * is known, it walks down the TG3_BDINFO entries to select the ring.
6130 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6131 * which is within the range of the new packet's length is chosen.
6133 * The "separate ring for rx status" scheme may sound queer, but it makes
6134 * sense from a cache coherency perspective. If only the host writes
6135 * to the buffer post rings, and only the chip writes to the rx status
6136 * rings, then cache lines never move beyond shared-modified state.
6137 * If both the host and chip were to write into the same ring, cache line
6138 * eviction could occur since both entities want it in an exclusive state.
6140 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6142 struct tg3 *tp = tnapi->tp;
6143 u32 work_mask, rx_std_posted = 0;
6144 u32 std_prod_idx, jmb_prod_idx;
6145 u32 sw_idx = tnapi->rx_rcb_ptr;
6148 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6150 hw_idx = *(tnapi->rx_rcb_prod_idx);
6152 * We need to order the read of hw_idx and the read of
6153 * the opaque cookie.
6158 std_prod_idx = tpr->rx_std_prod_idx;
6159 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6160 while (sw_idx != hw_idx && budget > 0) {
6161 struct ring_info *ri;
6162 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6164 struct sk_buff *skb;
6165 dma_addr_t dma_addr;
6166 u32 opaque_key, desc_idx, *post_ptr;
6170 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6171 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6172 if (opaque_key == RXD_OPAQUE_RING_STD) {
6173 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6174 dma_addr = dma_unmap_addr(ri, mapping);
6176 post_ptr = &std_prod_idx;
6178 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6179 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6180 dma_addr = dma_unmap_addr(ri, mapping);
6182 post_ptr = &jmb_prod_idx;
6184 goto next_pkt_nopost;
6186 work_mask |= opaque_key;
6188 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6189 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6191 tg3_recycle_rx(tnapi, tpr, opaque_key,
6192 desc_idx, *post_ptr);
6194 /* Other statistics kept track of by card. */
6199 prefetch(data + TG3_RX_OFFSET(tp));
6200 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6203 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6204 RXD_FLAG_PTPSTAT_PTPV1 ||
6205 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6206 RXD_FLAG_PTPSTAT_PTPV2) {
6207 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6208 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6211 if (len > TG3_RX_COPY_THRESH(tp)) {
6213 unsigned int frag_size;
6215 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6216 *post_ptr, &frag_size);
6220 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6221 PCI_DMA_FROMDEVICE);
6223 skb = build_skb(data, frag_size);
6225 tg3_frag_free(frag_size != 0, data);
6226 goto drop_it_no_recycle;
6228 skb_reserve(skb, TG3_RX_OFFSET(tp));
6229 /* Ensure that the update to the data happens
6230 * after the usage of the old DMA mapping.
6237 tg3_recycle_rx(tnapi, tpr, opaque_key,
6238 desc_idx, *post_ptr);
6240 skb = netdev_alloc_skb(tp->dev,
6241 len + TG3_RAW_IP_ALIGN);
6243 goto drop_it_no_recycle;
6245 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6246 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6248 data + TG3_RX_OFFSET(tp),
6250 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6255 tg3_hwclock_to_timestamp(tp, tstamp,
6256 skb_hwtstamps(skb));
6258 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6259 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6260 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6261 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6262 skb->ip_summed = CHECKSUM_UNNECESSARY;
6264 skb_checksum_none_assert(skb);
6266 skb->protocol = eth_type_trans(skb, tp->dev);
6268 if (len > (tp->dev->mtu + ETH_HLEN) &&
6269 skb->protocol != htons(ETH_P_8021Q)) {
6271 goto drop_it_no_recycle;
6274 if (desc->type_flags & RXD_FLAG_VLAN &&
6275 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6276 __vlan_hwaccel_put_tag(skb,
6277 desc->err_vlan & RXD_VLAN_MASK);
6279 napi_gro_receive(&tnapi->napi, skb);
6287 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6288 tpr->rx_std_prod_idx = std_prod_idx &
6289 tp->rx_std_ring_mask;
6290 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6291 tpr->rx_std_prod_idx);
6292 work_mask &= ~RXD_OPAQUE_RING_STD;
6297 sw_idx &= tp->rx_ret_ring_mask;
6299 /* Refresh hw_idx to see if there is new work */
6300 if (sw_idx == hw_idx) {
6301 hw_idx = *(tnapi->rx_rcb_prod_idx);
6306 /* ACK the status ring. */
6307 tnapi->rx_rcb_ptr = sw_idx;
6308 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6310 /* Refill RX ring(s). */
6311 if (!tg3_flag(tp, ENABLE_RSS)) {
6312 /* Sync BD data before updating mailbox */
6315 if (work_mask & RXD_OPAQUE_RING_STD) {
6316 tpr->rx_std_prod_idx = std_prod_idx &
6317 tp->rx_std_ring_mask;
6318 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6319 tpr->rx_std_prod_idx);
6321 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6322 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6323 tp->rx_jmb_ring_mask;
6324 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6325 tpr->rx_jmb_prod_idx);
6328 } else if (work_mask) {
6329 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6330 * updated before the producer indices can be updated.
6334 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6335 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6337 if (tnapi != &tp->napi[1]) {
6338 tp->rx_refill = true;
6339 napi_schedule(&tp->napi[1].napi);
6346 static void tg3_poll_link(struct tg3 *tp)
6348 /* handle link change and other phy events */
6349 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6350 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6352 if (sblk->status & SD_STATUS_LINK_CHG) {
6353 sblk->status = SD_STATUS_UPDATED |
6354 (sblk->status & ~SD_STATUS_LINK_CHG);
6355 spin_lock(&tp->lock);
6356 if (tg3_flag(tp, USE_PHYLIB)) {
6358 (MAC_STATUS_SYNC_CHANGED |
6359 MAC_STATUS_CFG_CHANGED |
6360 MAC_STATUS_MI_COMPLETION |
6361 MAC_STATUS_LNKSTATE_CHANGED));
6364 tg3_setup_phy(tp, 0);
6365 spin_unlock(&tp->lock);
6370 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6371 struct tg3_rx_prodring_set *dpr,
6372 struct tg3_rx_prodring_set *spr)
6374 u32 si, di, cpycnt, src_prod_idx;
6378 src_prod_idx = spr->rx_std_prod_idx;
6380 /* Make sure updates to the rx_std_buffers[] entries and the
6381 * standard producer index are seen in the correct order.
6385 if (spr->rx_std_cons_idx == src_prod_idx)
6388 if (spr->rx_std_cons_idx < src_prod_idx)
6389 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6391 cpycnt = tp->rx_std_ring_mask + 1 -
6392 spr->rx_std_cons_idx;
6394 cpycnt = min(cpycnt,
6395 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6397 si = spr->rx_std_cons_idx;
6398 di = dpr->rx_std_prod_idx;
6400 for (i = di; i < di + cpycnt; i++) {
6401 if (dpr->rx_std_buffers[i].data) {
6411 /* Ensure that updates to the rx_std_buffers ring and the
6412 * shadowed hardware producer ring from tg3_recycle_skb() are
6413 * ordered correctly WRT the skb check above.
6417 memcpy(&dpr->rx_std_buffers[di],
6418 &spr->rx_std_buffers[si],
6419 cpycnt * sizeof(struct ring_info));
6421 for (i = 0; i < cpycnt; i++, di++, si++) {
6422 struct tg3_rx_buffer_desc *sbd, *dbd;
6423 sbd = &spr->rx_std[si];
6424 dbd = &dpr->rx_std[di];
6425 dbd->addr_hi = sbd->addr_hi;
6426 dbd->addr_lo = sbd->addr_lo;
6429 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6430 tp->rx_std_ring_mask;
6431 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6432 tp->rx_std_ring_mask;
6436 src_prod_idx = spr->rx_jmb_prod_idx;
6438 /* Make sure updates to the rx_jmb_buffers[] entries and
6439 * the jumbo producer index are seen in the correct order.
6443 if (spr->rx_jmb_cons_idx == src_prod_idx)
6446 if (spr->rx_jmb_cons_idx < src_prod_idx)
6447 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6449 cpycnt = tp->rx_jmb_ring_mask + 1 -
6450 spr->rx_jmb_cons_idx;
6452 cpycnt = min(cpycnt,
6453 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6455 si = spr->rx_jmb_cons_idx;
6456 di = dpr->rx_jmb_prod_idx;
6458 for (i = di; i < di + cpycnt; i++) {
6459 if (dpr->rx_jmb_buffers[i].data) {
6469 /* Ensure that updates to the rx_jmb_buffers ring and the
6470 * shadowed hardware producer ring from tg3_recycle_skb() are
6471 * ordered correctly WRT the skb check above.
6475 memcpy(&dpr->rx_jmb_buffers[di],
6476 &spr->rx_jmb_buffers[si],
6477 cpycnt * sizeof(struct ring_info));
6479 for (i = 0; i < cpycnt; i++, di++, si++) {
6480 struct tg3_rx_buffer_desc *sbd, *dbd;
6481 sbd = &spr->rx_jmb[si].std;
6482 dbd = &dpr->rx_jmb[di].std;
6483 dbd->addr_hi = sbd->addr_hi;
6484 dbd->addr_lo = sbd->addr_lo;
6487 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6488 tp->rx_jmb_ring_mask;
6489 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6490 tp->rx_jmb_ring_mask;
6496 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6498 struct tg3 *tp = tnapi->tp;
6500 /* run TX completion thread */
6501 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6503 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6507 if (!tnapi->rx_rcb_prod_idx)
6510 /* run RX thread, within the bounds set by NAPI.
6511 * All RX "locking" is done by ensuring outside
6512 * code synchronizes with tg3->napi.poll()
6514 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6515 work_done += tg3_rx(tnapi, budget - work_done);
6517 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6518 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6520 u32 std_prod_idx = dpr->rx_std_prod_idx;
6521 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6523 tp->rx_refill = false;
6524 for (i = 1; i <= tp->rxq_cnt; i++)
6525 err |= tg3_rx_prodring_xfer(tp, dpr,
6526 &tp->napi[i].prodring);
6530 if (std_prod_idx != dpr->rx_std_prod_idx)
6531 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6532 dpr->rx_std_prod_idx);
6534 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6535 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6536 dpr->rx_jmb_prod_idx);
6541 tw32_f(HOSTCC_MODE, tp->coal_now);
6547 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6549 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6550 schedule_work(&tp->reset_task);
6553 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6555 cancel_work_sync(&tp->reset_task);
6556 tg3_flag_clear(tp, RESET_TASK_PENDING);
6557 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6560 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6562 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6563 struct tg3 *tp = tnapi->tp;
6565 struct tg3_hw_status *sblk = tnapi->hw_status;
6568 work_done = tg3_poll_work(tnapi, work_done, budget);
6570 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6573 if (unlikely(work_done >= budget))
6576 /* tp->last_tag is used in tg3_int_reenable() below
6577 * to tell the hw how much work has been processed,
6578 * so we must read it before checking for more work.
6580 tnapi->last_tag = sblk->status_tag;
6581 tnapi->last_irq_tag = tnapi->last_tag;
6584 /* check for RX/TX work to do */
6585 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6586 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6588 /* This test here is not race free, but will reduce
6589 * the number of interrupts by looping again.
6591 if (tnapi == &tp->napi[1] && tp->rx_refill)
6594 napi_complete(napi);
6595 /* Reenable interrupts. */
6596 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6598 /* This test here is synchronized by napi_schedule()
6599 * and napi_complete() to close the race condition.
6601 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6602 tw32(HOSTCC_MODE, tp->coalesce_mode |
6603 HOSTCC_MODE_ENABLE |
6614 /* work_done is guaranteed to be less than budget. */
6615 napi_complete(napi);
6616 tg3_reset_task_schedule(tp);
6620 static void tg3_process_error(struct tg3 *tp)
6623 bool real_error = false;
6625 if (tg3_flag(tp, ERROR_PROCESSED))
6628 /* Check Flow Attention register */
6629 val = tr32(HOSTCC_FLOW_ATTN);
6630 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6631 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6635 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6636 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6640 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6641 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6650 tg3_flag_set(tp, ERROR_PROCESSED);
6651 tg3_reset_task_schedule(tp);
6654 static int tg3_poll(struct napi_struct *napi, int budget)
6656 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6657 struct tg3 *tp = tnapi->tp;
6659 struct tg3_hw_status *sblk = tnapi->hw_status;
6662 if (sblk->status & SD_STATUS_ERROR)
6663 tg3_process_error(tp);
6667 work_done = tg3_poll_work(tnapi, work_done, budget);
6669 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6672 if (unlikely(work_done >= budget))
6675 if (tg3_flag(tp, TAGGED_STATUS)) {
6676 /* tp->last_tag is used in tg3_int_reenable() below
6677 * to tell the hw how much work has been processed,
6678 * so we must read it before checking for more work.
6680 tnapi->last_tag = sblk->status_tag;
6681 tnapi->last_irq_tag = tnapi->last_tag;
6684 sblk->status &= ~SD_STATUS_UPDATED;
6686 if (likely(!tg3_has_work(tnapi))) {
6687 napi_complete(napi);
6688 tg3_int_reenable(tnapi);
6696 /* work_done is guaranteed to be less than budget. */
6697 napi_complete(napi);
6698 tg3_reset_task_schedule(tp);
6702 static void tg3_napi_disable(struct tg3 *tp)
6706 for (i = tp->irq_cnt - 1; i >= 0; i--)
6707 napi_disable(&tp->napi[i].napi);
6710 static void tg3_napi_enable(struct tg3 *tp)
6714 for (i = 0; i < tp->irq_cnt; i++)
6715 napi_enable(&tp->napi[i].napi);
6718 static void tg3_napi_init(struct tg3 *tp)
6722 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6723 for (i = 1; i < tp->irq_cnt; i++)
6724 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6727 static void tg3_napi_fini(struct tg3 *tp)
6731 for (i = 0; i < tp->irq_cnt; i++)
6732 netif_napi_del(&tp->napi[i].napi);
6735 static inline void tg3_netif_stop(struct tg3 *tp)
6737 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6738 tg3_napi_disable(tp);
6739 netif_carrier_off(tp->dev);
6740 netif_tx_disable(tp->dev);
6743 /* tp->lock must be held */
6744 static inline void tg3_netif_start(struct tg3 *tp)
6748 /* NOTE: unconditional netif_tx_wake_all_queues is only
6749 * appropriate so long as all callers are assured to
6750 * have free tx slots (such as after tg3_init_hw)
6752 netif_tx_wake_all_queues(tp->dev);
6755 netif_carrier_on(tp->dev);
6757 tg3_napi_enable(tp);
6758 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6759 tg3_enable_ints(tp);
6762 static void tg3_irq_quiesce(struct tg3 *tp)
6766 BUG_ON(tp->irq_sync);
6771 for (i = 0; i < tp->irq_cnt; i++)
6772 synchronize_irq(tp->napi[i].irq_vec);
6775 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6776 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6777 * with as well. Most of the time, this is not necessary except when
6778 * shutting down the device.
6780 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6782 spin_lock_bh(&tp->lock);
6784 tg3_irq_quiesce(tp);
6787 static inline void tg3_full_unlock(struct tg3 *tp)
6789 spin_unlock_bh(&tp->lock);
6792 /* One-shot MSI handler - Chip automatically disables interrupt
6793 * after sending MSI so driver doesn't have to do it.
6795 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6797 struct tg3_napi *tnapi = dev_id;
6798 struct tg3 *tp = tnapi->tp;
6800 prefetch(tnapi->hw_status);
6802 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6804 if (likely(!tg3_irq_sync(tp)))
6805 napi_schedule(&tnapi->napi);
6810 /* MSI ISR - No need to check for interrupt sharing and no need to
6811 * flush status block and interrupt mailbox. PCI ordering rules
6812 * guarantee that MSI will arrive after the status block.
6814 static irqreturn_t tg3_msi(int irq, void *dev_id)
6816 struct tg3_napi *tnapi = dev_id;
6817 struct tg3 *tp = tnapi->tp;
6819 prefetch(tnapi->hw_status);
6821 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6823 * Writing any value to intr-mbox-0 clears PCI INTA# and
6824 * chip-internal interrupt pending events.
6825 * Writing non-zero to intr-mbox-0 additional tells the
6826 * NIC to stop sending us irqs, engaging "in-intr-handler"
6829 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6830 if (likely(!tg3_irq_sync(tp)))
6831 napi_schedule(&tnapi->napi);
6833 return IRQ_RETVAL(1);
6836 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6838 struct tg3_napi *tnapi = dev_id;
6839 struct tg3 *tp = tnapi->tp;
6840 struct tg3_hw_status *sblk = tnapi->hw_status;
6841 unsigned int handled = 1;
6843 /* In INTx mode, it is possible for the interrupt to arrive at
6844 * the CPU before the status block posted prior to the interrupt.
6845 * Reading the PCI State register will confirm whether the
6846 * interrupt is ours and will flush the status block.
6848 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6849 if (tg3_flag(tp, CHIP_RESETTING) ||
6850 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6857 * Writing any value to intr-mbox-0 clears PCI INTA# and
6858 * chip-internal interrupt pending events.
6859 * Writing non-zero to intr-mbox-0 additional tells the
6860 * NIC to stop sending us irqs, engaging "in-intr-handler"
6863 * Flush the mailbox to de-assert the IRQ immediately to prevent
6864 * spurious interrupts. The flush impacts performance but
6865 * excessive spurious interrupts can be worse in some cases.
6867 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6868 if (tg3_irq_sync(tp))
6870 sblk->status &= ~SD_STATUS_UPDATED;
6871 if (likely(tg3_has_work(tnapi))) {
6872 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6873 napi_schedule(&tnapi->napi);
6875 /* No work, shared interrupt perhaps? re-enable
6876 * interrupts, and flush that PCI write
6878 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6882 return IRQ_RETVAL(handled);
6885 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6887 struct tg3_napi *tnapi = dev_id;
6888 struct tg3 *tp = tnapi->tp;
6889 struct tg3_hw_status *sblk = tnapi->hw_status;
6890 unsigned int handled = 1;
6892 /* In INTx mode, it is possible for the interrupt to arrive at
6893 * the CPU before the status block posted prior to the interrupt.
6894 * Reading the PCI State register will confirm whether the
6895 * interrupt is ours and will flush the status block.
6897 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6898 if (tg3_flag(tp, CHIP_RESETTING) ||
6899 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6906 * writing any value to intr-mbox-0 clears PCI INTA# and
6907 * chip-internal interrupt pending events.
6908 * writing non-zero to intr-mbox-0 additional tells the
6909 * NIC to stop sending us irqs, engaging "in-intr-handler"
6912 * Flush the mailbox to de-assert the IRQ immediately to prevent
6913 * spurious interrupts. The flush impacts performance but
6914 * excessive spurious interrupts can be worse in some cases.
6916 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6919 * In a shared interrupt configuration, sometimes other devices'
6920 * interrupts will scream. We record the current status tag here
6921 * so that the above check can report that the screaming interrupts
6922 * are unhandled. Eventually they will be silenced.
6924 tnapi->last_irq_tag = sblk->status_tag;
6926 if (tg3_irq_sync(tp))
6929 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6931 napi_schedule(&tnapi->napi);
6934 return IRQ_RETVAL(handled);
6937 /* ISR for interrupt test */
6938 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6940 struct tg3_napi *tnapi = dev_id;
6941 struct tg3 *tp = tnapi->tp;
6942 struct tg3_hw_status *sblk = tnapi->hw_status;
6944 if ((sblk->status & SD_STATUS_UPDATED) ||
6945 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6946 tg3_disable_ints(tp);
6947 return IRQ_RETVAL(1);
6949 return IRQ_RETVAL(0);
6952 #ifdef CONFIG_NET_POLL_CONTROLLER
6953 static void tg3_poll_controller(struct net_device *dev)
6956 struct tg3 *tp = netdev_priv(dev);
6958 for (i = 0; i < tp->irq_cnt; i++)
6959 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6963 static void tg3_tx_timeout(struct net_device *dev)
6965 struct tg3 *tp = netdev_priv(dev);
6967 if (netif_msg_tx_err(tp)) {
6968 netdev_err(dev, "transmit timed out, resetting\n");
6972 tg3_reset_task_schedule(tp);
6975 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6976 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6978 u32 base = (u32) mapping & 0xffffffff;
6980 return (base > 0xffffdcc0) && (base + len + 8 < base);
6983 /* Test for DMA addresses > 40-bit */
6984 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6987 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6988 if (tg3_flag(tp, 40BIT_DMA_BUG))
6989 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6996 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6997 dma_addr_t mapping, u32 len, u32 flags,
7000 txbd->addr_hi = ((u64) mapping >> 32);
7001 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7002 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7003 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7006 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7007 dma_addr_t map, u32 len, u32 flags,
7010 struct tg3 *tp = tnapi->tp;
7013 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7016 if (tg3_4g_overflow_test(map, len))
7019 if (tg3_40bit_overflow_test(tp, map, len))
7022 if (tp->dma_limit) {
7023 u32 prvidx = *entry;
7024 u32 tmp_flag = flags & ~TXD_FLAG_END;
7025 while (len > tp->dma_limit && *budget) {
7026 u32 frag_len = tp->dma_limit;
7027 len -= tp->dma_limit;
7029 /* Avoid the 8byte DMA problem */
7031 len += tp->dma_limit / 2;
7032 frag_len = tp->dma_limit / 2;
7035 tnapi->tx_buffers[*entry].fragmented = true;
7037 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7038 frag_len, tmp_flag, mss, vlan);
7041 *entry = NEXT_TX(*entry);
7048 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7049 len, flags, mss, vlan);
7051 *entry = NEXT_TX(*entry);
7054 tnapi->tx_buffers[prvidx].fragmented = false;
7058 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7059 len, flags, mss, vlan);
7060 *entry = NEXT_TX(*entry);
7066 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7069 struct sk_buff *skb;
7070 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7075 pci_unmap_single(tnapi->tp->pdev,
7076 dma_unmap_addr(txb, mapping),
7080 while (txb->fragmented) {
7081 txb->fragmented = false;
7082 entry = NEXT_TX(entry);
7083 txb = &tnapi->tx_buffers[entry];
7086 for (i = 0; i <= last; i++) {
7087 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7089 entry = NEXT_TX(entry);
7090 txb = &tnapi->tx_buffers[entry];
7092 pci_unmap_page(tnapi->tp->pdev,
7093 dma_unmap_addr(txb, mapping),
7094 skb_frag_size(frag), PCI_DMA_TODEVICE);
7096 while (txb->fragmented) {
7097 txb->fragmented = false;
7098 entry = NEXT_TX(entry);
7099 txb = &tnapi->tx_buffers[entry];
7104 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7105 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7106 struct sk_buff **pskb,
7107 u32 *entry, u32 *budget,
7108 u32 base_flags, u32 mss, u32 vlan)
7110 struct tg3 *tp = tnapi->tp;
7111 struct sk_buff *new_skb, *skb = *pskb;
7112 dma_addr_t new_addr = 0;
7115 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7116 new_skb = skb_copy(skb, GFP_ATOMIC);
7118 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7120 new_skb = skb_copy_expand(skb,
7121 skb_headroom(skb) + more_headroom,
7122 skb_tailroom(skb), GFP_ATOMIC);
7128 /* New SKB is guaranteed to be linear. */
7129 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7131 /* Make sure the mapping succeeded */
7132 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7133 dev_kfree_skb(new_skb);
7136 u32 save_entry = *entry;
7138 base_flags |= TXD_FLAG_END;
7140 tnapi->tx_buffers[*entry].skb = new_skb;
7141 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7144 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7145 new_skb->len, base_flags,
7147 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7148 dev_kfree_skb(new_skb);
7159 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7161 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7162 * TSO header is greater than 80 bytes.
7164 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7166 struct sk_buff *segs, *nskb;
7167 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7169 /* Estimate the number of fragments in the worst case */
7170 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7171 netif_stop_queue(tp->dev);
7173 /* netif_tx_stop_queue() must be done before checking
7174 * checking tx index in tg3_tx_avail() below, because in
7175 * tg3_tx(), we update tx index before checking for
7176 * netif_tx_queue_stopped().
7179 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7180 return NETDEV_TX_BUSY;
7182 netif_wake_queue(tp->dev);
7185 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7187 goto tg3_tso_bug_end;
7193 tg3_start_xmit(nskb, tp->dev);
7199 return NETDEV_TX_OK;
7202 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7203 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7205 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7207 struct tg3 *tp = netdev_priv(dev);
7208 u32 len, entry, base_flags, mss, vlan = 0;
7210 int i = -1, would_hit_hwbug;
7212 struct tg3_napi *tnapi;
7213 struct netdev_queue *txq;
7216 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7217 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7218 if (tg3_flag(tp, ENABLE_TSS))
7221 budget = tg3_tx_avail(tnapi);
7223 /* We are running in BH disabled context with netif_tx_lock
7224 * and TX reclaim runs via tp->napi.poll inside of a software
7225 * interrupt. Furthermore, IRQ processing runs lockless so we have
7226 * no IRQ context deadlocks to worry about either. Rejoice!
7228 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7229 if (!netif_tx_queue_stopped(txq)) {
7230 netif_tx_stop_queue(txq);
7232 /* This is a hard error, log it. */
7234 "BUG! Tx Ring full when queue awake!\n");
7236 return NETDEV_TX_BUSY;
7239 entry = tnapi->tx_prod;
7241 if (skb->ip_summed == CHECKSUM_PARTIAL)
7242 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7244 mss = skb_shinfo(skb)->gso_size;
7247 u32 tcp_opt_len, hdr_len;
7249 if (skb_header_cloned(skb) &&
7250 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7254 tcp_opt_len = tcp_optlen(skb);
7256 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7258 if (!skb_is_gso_v6(skb)) {
7260 iph->tot_len = htons(mss + hdr_len);
7263 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7264 tg3_flag(tp, TSO_BUG))
7265 return tg3_tso_bug(tp, skb);
7267 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7268 TXD_FLAG_CPU_POST_DMA);
7270 if (tg3_flag(tp, HW_TSO_1) ||
7271 tg3_flag(tp, HW_TSO_2) ||
7272 tg3_flag(tp, HW_TSO_3)) {
7273 tcp_hdr(skb)->check = 0;
7274 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7276 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7281 if (tg3_flag(tp, HW_TSO_3)) {
7282 mss |= (hdr_len & 0xc) << 12;
7284 base_flags |= 0x00000010;
7285 base_flags |= (hdr_len & 0x3e0) << 5;
7286 } else if (tg3_flag(tp, HW_TSO_2))
7287 mss |= hdr_len << 9;
7288 else if (tg3_flag(tp, HW_TSO_1) ||
7289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7290 if (tcp_opt_len || iph->ihl > 5) {
7293 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7294 mss |= (tsflags << 11);
7297 if (tcp_opt_len || iph->ihl > 5) {
7300 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7301 base_flags |= tsflags << 12;
7306 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7307 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7308 base_flags |= TXD_FLAG_JMB_PKT;
7310 if (vlan_tx_tag_present(skb)) {
7311 base_flags |= TXD_FLAG_VLAN;
7312 vlan = vlan_tx_tag_get(skb);
7315 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7316 tg3_flag(tp, TX_TSTAMP_EN)) {
7317 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7318 base_flags |= TXD_FLAG_HWTSTAMP;
7321 len = skb_headlen(skb);
7323 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7324 if (pci_dma_mapping_error(tp->pdev, mapping))
7328 tnapi->tx_buffers[entry].skb = skb;
7329 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7331 would_hit_hwbug = 0;
7333 if (tg3_flag(tp, 5701_DMA_BUG))
7334 would_hit_hwbug = 1;
7336 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7337 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7339 would_hit_hwbug = 1;
7340 } else if (skb_shinfo(skb)->nr_frags > 0) {
7343 if (!tg3_flag(tp, HW_TSO_1) &&
7344 !tg3_flag(tp, HW_TSO_2) &&
7345 !tg3_flag(tp, HW_TSO_3))
7348 /* Now loop through additional data
7349 * fragments, and queue them.
7351 last = skb_shinfo(skb)->nr_frags - 1;
7352 for (i = 0; i <= last; i++) {
7353 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7355 len = skb_frag_size(frag);
7356 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7357 len, DMA_TO_DEVICE);
7359 tnapi->tx_buffers[entry].skb = NULL;
7360 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7362 if (dma_mapping_error(&tp->pdev->dev, mapping))
7366 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7368 ((i == last) ? TXD_FLAG_END : 0),
7370 would_hit_hwbug = 1;
7376 if (would_hit_hwbug) {
7377 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7379 /* If the workaround fails due to memory/mapping
7380 * failure, silently drop this packet.
7382 entry = tnapi->tx_prod;
7383 budget = tg3_tx_avail(tnapi);
7384 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7385 base_flags, mss, vlan))
7389 skb_tx_timestamp(skb);
7390 netdev_tx_sent_queue(txq, skb->len);
7392 /* Sync BD data before updating mailbox */
7395 /* Packets are ready, update Tx producer idx local and on card. */
7396 tw32_tx_mbox(tnapi->prodmbox, entry);
7398 tnapi->tx_prod = entry;
7399 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7400 netif_tx_stop_queue(txq);
7402 /* netif_tx_stop_queue() must be done before checking
7403 * checking tx index in tg3_tx_avail() below, because in
7404 * tg3_tx(), we update tx index before checking for
7405 * netif_tx_queue_stopped().
7408 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7409 netif_tx_wake_queue(txq);
7413 return NETDEV_TX_OK;
7416 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7417 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7422 return NETDEV_TX_OK;
7425 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7428 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7429 MAC_MODE_PORT_MODE_MASK);
7431 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7433 if (!tg3_flag(tp, 5705_PLUS))
7434 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7436 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7437 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7439 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7441 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7443 if (tg3_flag(tp, 5705_PLUS) ||
7444 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7446 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7449 tw32(MAC_MODE, tp->mac_mode);
7453 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7455 u32 val, bmcr, mac_mode, ptest = 0;
7457 tg3_phy_toggle_apd(tp, false);
7458 tg3_phy_toggle_automdix(tp, 0);
7460 if (extlpbk && tg3_phy_set_extloopbk(tp))
7463 bmcr = BMCR_FULLDPLX;
7468 bmcr |= BMCR_SPEED100;
7472 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7474 bmcr |= BMCR_SPEED100;
7477 bmcr |= BMCR_SPEED1000;
7482 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7483 tg3_readphy(tp, MII_CTRL1000, &val);
7484 val |= CTL1000_AS_MASTER |
7485 CTL1000_ENABLE_MASTER;
7486 tg3_writephy(tp, MII_CTRL1000, val);
7488 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7489 MII_TG3_FET_PTEST_TRIM_2;
7490 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7493 bmcr |= BMCR_LOOPBACK;
7495 tg3_writephy(tp, MII_BMCR, bmcr);
7497 /* The write needs to be flushed for the FETs */
7498 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7499 tg3_readphy(tp, MII_BMCR, &bmcr);
7503 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7504 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7505 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7506 MII_TG3_FET_PTEST_FRC_TX_LINK |
7507 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7509 /* The write needs to be flushed for the AC131 */
7510 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7513 /* Reset to prevent losing 1st rx packet intermittently */
7514 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7515 tg3_flag(tp, 5780_CLASS)) {
7516 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7518 tw32_f(MAC_RX_MODE, tp->rx_mode);
7521 mac_mode = tp->mac_mode &
7522 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7523 if (speed == SPEED_1000)
7524 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7526 mac_mode |= MAC_MODE_PORT_MODE_MII;
7528 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7529 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7531 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7532 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7533 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7534 mac_mode |= MAC_MODE_LINK_POLARITY;
7536 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7537 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7540 tw32(MAC_MODE, mac_mode);
7546 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7548 struct tg3 *tp = netdev_priv(dev);
7550 if (features & NETIF_F_LOOPBACK) {
7551 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7554 spin_lock_bh(&tp->lock);
7555 tg3_mac_loopback(tp, true);
7556 netif_carrier_on(tp->dev);
7557 spin_unlock_bh(&tp->lock);
7558 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7560 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7563 spin_lock_bh(&tp->lock);
7564 tg3_mac_loopback(tp, false);
7565 /* Force link status check */
7566 tg3_setup_phy(tp, 1);
7567 spin_unlock_bh(&tp->lock);
7568 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7572 static netdev_features_t tg3_fix_features(struct net_device *dev,
7573 netdev_features_t features)
7575 struct tg3 *tp = netdev_priv(dev);
7577 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7578 features &= ~NETIF_F_ALL_TSO;
7583 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7585 netdev_features_t changed = dev->features ^ features;
7587 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7588 tg3_set_loopback(dev, features);
7593 static void tg3_rx_prodring_free(struct tg3 *tp,
7594 struct tg3_rx_prodring_set *tpr)
7598 if (tpr != &tp->napi[0].prodring) {
7599 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7600 i = (i + 1) & tp->rx_std_ring_mask)
7601 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7604 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7605 for (i = tpr->rx_jmb_cons_idx;
7606 i != tpr->rx_jmb_prod_idx;
7607 i = (i + 1) & tp->rx_jmb_ring_mask) {
7608 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7616 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7617 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7620 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7621 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7622 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7627 /* Initialize rx rings for packet processing.
7629 * The chip has been shut down and the driver detached from
7630 * the networking, so no interrupts or new tx packets will
7631 * end up in the driver. tp->{tx,}lock are held and thus
7634 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7635 struct tg3_rx_prodring_set *tpr)
7637 u32 i, rx_pkt_dma_sz;
7639 tpr->rx_std_cons_idx = 0;
7640 tpr->rx_std_prod_idx = 0;
7641 tpr->rx_jmb_cons_idx = 0;
7642 tpr->rx_jmb_prod_idx = 0;
7644 if (tpr != &tp->napi[0].prodring) {
7645 memset(&tpr->rx_std_buffers[0], 0,
7646 TG3_RX_STD_BUFF_RING_SIZE(tp));
7647 if (tpr->rx_jmb_buffers)
7648 memset(&tpr->rx_jmb_buffers[0], 0,
7649 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7653 /* Zero out all descriptors. */
7654 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7656 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7657 if (tg3_flag(tp, 5780_CLASS) &&
7658 tp->dev->mtu > ETH_DATA_LEN)
7659 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7660 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7662 /* Initialize invariants of the rings, we only set this
7663 * stuff once. This works because the card does not
7664 * write into the rx buffer posting rings.
7666 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7667 struct tg3_rx_buffer_desc *rxd;
7669 rxd = &tpr->rx_std[i];
7670 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7671 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7672 rxd->opaque = (RXD_OPAQUE_RING_STD |
7673 (i << RXD_OPAQUE_INDEX_SHIFT));
7676 /* Now allocate fresh SKBs for each rx ring. */
7677 for (i = 0; i < tp->rx_pending; i++) {
7678 unsigned int frag_size;
7680 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7682 netdev_warn(tp->dev,
7683 "Using a smaller RX standard ring. Only "
7684 "%d out of %d buffers were allocated "
7685 "successfully\n", i, tp->rx_pending);
7693 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7696 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7698 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7701 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7702 struct tg3_rx_buffer_desc *rxd;
7704 rxd = &tpr->rx_jmb[i].std;
7705 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7706 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7708 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7709 (i << RXD_OPAQUE_INDEX_SHIFT));
7712 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7713 unsigned int frag_size;
7715 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7717 netdev_warn(tp->dev,
7718 "Using a smaller RX jumbo ring. Only %d "
7719 "out of %d buffers were allocated "
7720 "successfully\n", i, tp->rx_jumbo_pending);
7723 tp->rx_jumbo_pending = i;
7732 tg3_rx_prodring_free(tp, tpr);
7736 static void tg3_rx_prodring_fini(struct tg3 *tp,
7737 struct tg3_rx_prodring_set *tpr)
7739 kfree(tpr->rx_std_buffers);
7740 tpr->rx_std_buffers = NULL;
7741 kfree(tpr->rx_jmb_buffers);
7742 tpr->rx_jmb_buffers = NULL;
7744 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7745 tpr->rx_std, tpr->rx_std_mapping);
7749 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7750 tpr->rx_jmb, tpr->rx_jmb_mapping);
7755 static int tg3_rx_prodring_init(struct tg3 *tp,
7756 struct tg3_rx_prodring_set *tpr)
7758 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7760 if (!tpr->rx_std_buffers)
7763 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7764 TG3_RX_STD_RING_BYTES(tp),
7765 &tpr->rx_std_mapping,
7770 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7771 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7773 if (!tpr->rx_jmb_buffers)
7776 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7777 TG3_RX_JMB_RING_BYTES(tp),
7778 &tpr->rx_jmb_mapping,
7787 tg3_rx_prodring_fini(tp, tpr);
7791 /* Free up pending packets in all rx/tx rings.
7793 * The chip has been shut down and the driver detached from
7794 * the networking, so no interrupts or new tx packets will
7795 * end up in the driver. tp->{tx,}lock is not held and we are not
7796 * in an interrupt context and thus may sleep.
7798 static void tg3_free_rings(struct tg3 *tp)
7802 for (j = 0; j < tp->irq_cnt; j++) {
7803 struct tg3_napi *tnapi = &tp->napi[j];
7805 tg3_rx_prodring_free(tp, &tnapi->prodring);
7807 if (!tnapi->tx_buffers)
7810 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7811 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7816 tg3_tx_skb_unmap(tnapi, i,
7817 skb_shinfo(skb)->nr_frags - 1);
7819 dev_kfree_skb_any(skb);
7821 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7825 /* Initialize tx/rx rings for packet processing.
7827 * The chip has been shut down and the driver detached from
7828 * the networking, so no interrupts or new tx packets will
7829 * end up in the driver. tp->{tx,}lock are held and thus
7832 static int tg3_init_rings(struct tg3 *tp)
7836 /* Free up all the SKBs. */
7839 for (i = 0; i < tp->irq_cnt; i++) {
7840 struct tg3_napi *tnapi = &tp->napi[i];
7842 tnapi->last_tag = 0;
7843 tnapi->last_irq_tag = 0;
7844 tnapi->hw_status->status = 0;
7845 tnapi->hw_status->status_tag = 0;
7846 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7851 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7853 tnapi->rx_rcb_ptr = 0;
7855 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7857 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7866 static void tg3_mem_tx_release(struct tg3 *tp)
7870 for (i = 0; i < tp->irq_max; i++) {
7871 struct tg3_napi *tnapi = &tp->napi[i];
7873 if (tnapi->tx_ring) {
7874 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7875 tnapi->tx_ring, tnapi->tx_desc_mapping);
7876 tnapi->tx_ring = NULL;
7879 kfree(tnapi->tx_buffers);
7880 tnapi->tx_buffers = NULL;
7884 static int tg3_mem_tx_acquire(struct tg3 *tp)
7887 struct tg3_napi *tnapi = &tp->napi[0];
7889 /* If multivector TSS is enabled, vector 0 does not handle
7890 * tx interrupts. Don't allocate any resources for it.
7892 if (tg3_flag(tp, ENABLE_TSS))
7895 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7896 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7897 TG3_TX_RING_SIZE, GFP_KERNEL);
7898 if (!tnapi->tx_buffers)
7901 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7903 &tnapi->tx_desc_mapping,
7905 if (!tnapi->tx_ring)
7912 tg3_mem_tx_release(tp);
7916 static void tg3_mem_rx_release(struct tg3 *tp)
7920 for (i = 0; i < tp->irq_max; i++) {
7921 struct tg3_napi *tnapi = &tp->napi[i];
7923 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7928 dma_free_coherent(&tp->pdev->dev,
7929 TG3_RX_RCB_RING_BYTES(tp),
7931 tnapi->rx_rcb_mapping);
7932 tnapi->rx_rcb = NULL;
7936 static int tg3_mem_rx_acquire(struct tg3 *tp)
7938 unsigned int i, limit;
7940 limit = tp->rxq_cnt;
7942 /* If RSS is enabled, we need a (dummy) producer ring
7943 * set on vector zero. This is the true hw prodring.
7945 if (tg3_flag(tp, ENABLE_RSS))
7948 for (i = 0; i < limit; i++) {
7949 struct tg3_napi *tnapi = &tp->napi[i];
7951 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7954 /* If multivector RSS is enabled, vector 0
7955 * does not handle rx or tx interrupts.
7956 * Don't allocate any resources for it.
7958 if (!i && tg3_flag(tp, ENABLE_RSS))
7961 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7962 TG3_RX_RCB_RING_BYTES(tp),
7963 &tnapi->rx_rcb_mapping,
7968 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7974 tg3_mem_rx_release(tp);
7979 * Must not be invoked with interrupt sources disabled and
7980 * the hardware shutdown down.
7982 static void tg3_free_consistent(struct tg3 *tp)
7986 for (i = 0; i < tp->irq_cnt; i++) {
7987 struct tg3_napi *tnapi = &tp->napi[i];
7989 if (tnapi->hw_status) {
7990 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7992 tnapi->status_mapping);
7993 tnapi->hw_status = NULL;
7997 tg3_mem_rx_release(tp);
7998 tg3_mem_tx_release(tp);
8001 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8002 tp->hw_stats, tp->stats_mapping);
8003 tp->hw_stats = NULL;
8008 * Must not be invoked with interrupt sources disabled and
8009 * the hardware shutdown down. Can sleep.
8011 static int tg3_alloc_consistent(struct tg3 *tp)
8015 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8016 sizeof(struct tg3_hw_stats),
8022 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8024 for (i = 0; i < tp->irq_cnt; i++) {
8025 struct tg3_napi *tnapi = &tp->napi[i];
8026 struct tg3_hw_status *sblk;
8028 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8030 &tnapi->status_mapping,
8032 if (!tnapi->hw_status)
8035 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8036 sblk = tnapi->hw_status;
8038 if (tg3_flag(tp, ENABLE_RSS)) {
8039 u16 *prodptr = NULL;
8042 * When RSS is enabled, the status block format changes
8043 * slightly. The "rx_jumbo_consumer", "reserved",
8044 * and "rx_mini_consumer" members get mapped to the
8045 * other three rx return ring producer indexes.
8049 prodptr = &sblk->idx[0].rx_producer;
8052 prodptr = &sblk->rx_jumbo_consumer;
8055 prodptr = &sblk->reserved;
8058 prodptr = &sblk->rx_mini_consumer;
8061 tnapi->rx_rcb_prod_idx = prodptr;
8063 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8067 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8073 tg3_free_consistent(tp);
8077 #define MAX_WAIT_CNT 1000
8079 /* To stop a block, clear the enable bit and poll till it
8080 * clears. tp->lock is held.
8082 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8087 if (tg3_flag(tp, 5705_PLUS)) {
8094 /* We can't enable/disable these bits of the
8095 * 5705/5750, just say success.
8108 for (i = 0; i < MAX_WAIT_CNT; i++) {
8111 if ((val & enable_bit) == 0)
8115 if (i == MAX_WAIT_CNT && !silent) {
8116 dev_err(&tp->pdev->dev,
8117 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8125 /* tp->lock is held. */
8126 static int tg3_abort_hw(struct tg3 *tp, int silent)
8130 tg3_disable_ints(tp);
8132 tp->rx_mode &= ~RX_MODE_ENABLE;
8133 tw32_f(MAC_RX_MODE, tp->rx_mode);
8136 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8137 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8138 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8139 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8140 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8141 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8143 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8144 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8145 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8146 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8147 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8148 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8149 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8151 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8152 tw32_f(MAC_MODE, tp->mac_mode);
8155 tp->tx_mode &= ~TX_MODE_ENABLE;
8156 tw32_f(MAC_TX_MODE, tp->tx_mode);
8158 for (i = 0; i < MAX_WAIT_CNT; i++) {
8160 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8163 if (i >= MAX_WAIT_CNT) {
8164 dev_err(&tp->pdev->dev,
8165 "%s timed out, TX_MODE_ENABLE will not clear "
8166 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8170 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8171 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8172 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8174 tw32(FTQ_RESET, 0xffffffff);
8175 tw32(FTQ_RESET, 0x00000000);
8177 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8178 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8180 for (i = 0; i < tp->irq_cnt; i++) {
8181 struct tg3_napi *tnapi = &tp->napi[i];
8182 if (tnapi->hw_status)
8183 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8189 /* Save PCI command register before chip reset */
8190 static void tg3_save_pci_state(struct tg3 *tp)
8192 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8195 /* Restore PCI state after chip reset */
8196 static void tg3_restore_pci_state(struct tg3 *tp)
8200 /* Re-enable indirect register accesses. */
8201 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8202 tp->misc_host_ctrl);
8204 /* Set MAX PCI retry to zero. */
8205 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8206 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8207 tg3_flag(tp, PCIX_MODE))
8208 val |= PCISTATE_RETRY_SAME_DMA;
8209 /* Allow reads and writes to the APE register and memory space. */
8210 if (tg3_flag(tp, ENABLE_APE))
8211 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8212 PCISTATE_ALLOW_APE_SHMEM_WR |
8213 PCISTATE_ALLOW_APE_PSPACE_WR;
8214 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8216 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8218 if (!tg3_flag(tp, PCI_EXPRESS)) {
8219 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8220 tp->pci_cacheline_sz);
8221 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8225 /* Make sure PCI-X relaxed ordering bit is clear. */
8226 if (tg3_flag(tp, PCIX_MODE)) {
8229 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8231 pcix_cmd &= ~PCI_X_CMD_ERO;
8232 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8236 if (tg3_flag(tp, 5780_CLASS)) {
8238 /* Chip reset on 5780 will reset MSI enable bit,
8239 * so need to restore it.
8241 if (tg3_flag(tp, USING_MSI)) {
8244 pci_read_config_word(tp->pdev,
8245 tp->msi_cap + PCI_MSI_FLAGS,
8247 pci_write_config_word(tp->pdev,
8248 tp->msi_cap + PCI_MSI_FLAGS,
8249 ctrl | PCI_MSI_FLAGS_ENABLE);
8250 val = tr32(MSGINT_MODE);
8251 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8256 /* tp->lock is held. */
8257 static int tg3_chip_reset(struct tg3 *tp)
8260 void (*write_op)(struct tg3 *, u32, u32);
8265 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8267 /* No matching tg3_nvram_unlock() after this because
8268 * chip reset below will undo the nvram lock.
8270 tp->nvram_lock_cnt = 0;
8272 /* GRC_MISC_CFG core clock reset will clear the memory
8273 * enable bit in PCI register 4 and the MSI enable bit
8274 * on some chips, so we save relevant registers here.
8276 tg3_save_pci_state(tp);
8278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8279 tg3_flag(tp, 5755_PLUS))
8280 tw32(GRC_FASTBOOT_PC, 0);
8283 * We must avoid the readl() that normally takes place.
8284 * It locks machines, causes machine checks, and other
8285 * fun things. So, temporarily disable the 5701
8286 * hardware workaround, while we do the reset.
8288 write_op = tp->write32;
8289 if (write_op == tg3_write_flush_reg32)
8290 tp->write32 = tg3_write32;
8292 /* Prevent the irq handler from reading or writing PCI registers
8293 * during chip reset when the memory enable bit in the PCI command
8294 * register may be cleared. The chip does not generate interrupt
8295 * at this time, but the irq handler may still be called due to irq
8296 * sharing or irqpoll.
8298 tg3_flag_set(tp, CHIP_RESETTING);
8299 for (i = 0; i < tp->irq_cnt; i++) {
8300 struct tg3_napi *tnapi = &tp->napi[i];
8301 if (tnapi->hw_status) {
8302 tnapi->hw_status->status = 0;
8303 tnapi->hw_status->status_tag = 0;
8305 tnapi->last_tag = 0;
8306 tnapi->last_irq_tag = 0;
8310 for (i = 0; i < tp->irq_cnt; i++)
8311 synchronize_irq(tp->napi[i].irq_vec);
8313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8314 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8315 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8319 val = GRC_MISC_CFG_CORECLK_RESET;
8321 if (tg3_flag(tp, PCI_EXPRESS)) {
8322 /* Force PCIe 1.0a mode */
8323 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8324 !tg3_flag(tp, 57765_PLUS) &&
8325 tr32(TG3_PCIE_PHY_TSTCTL) ==
8326 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8327 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8329 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8330 tw32(GRC_MISC_CFG, (1 << 29));
8335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8336 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8337 tw32(GRC_VCPU_EXT_CTRL,
8338 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8341 /* Manage gphy power for all CPMU absent PCIe devices. */
8342 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8343 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8345 tw32(GRC_MISC_CFG, val);
8347 /* restore 5701 hardware bug workaround write method */
8348 tp->write32 = write_op;
8350 /* Unfortunately, we have to delay before the PCI read back.
8351 * Some 575X chips even will not respond to a PCI cfg access
8352 * when the reset command is given to the chip.
8354 * How do these hardware designers expect things to work
8355 * properly if the PCI write is posted for a long period
8356 * of time? It is always necessary to have some method by
8357 * which a register read back can occur to push the write
8358 * out which does the reset.
8360 * For most tg3 variants the trick below was working.
8365 /* Flush PCI posted writes. The normal MMIO registers
8366 * are inaccessible at this time so this is the only
8367 * way to make this reliably (actually, this is no longer
8368 * the case, see above). I tried to use indirect
8369 * register read/write but this upset some 5701 variants.
8371 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8375 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8378 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8382 /* Wait for link training to complete. */
8383 for (j = 0; j < 5000; j++)
8386 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8387 pci_write_config_dword(tp->pdev, 0xc4,
8388 cfg_val | (1 << 15));
8391 /* Clear the "no snoop" and "relaxed ordering" bits. */
8392 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8394 * Older PCIe devices only support the 128 byte
8395 * MPS setting. Enforce the restriction.
8397 if (!tg3_flag(tp, CPMU_PRESENT))
8398 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8399 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8401 /* Clear error status */
8402 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8403 PCI_EXP_DEVSTA_CED |
8404 PCI_EXP_DEVSTA_NFED |
8405 PCI_EXP_DEVSTA_FED |
8406 PCI_EXP_DEVSTA_URD);
8409 tg3_restore_pci_state(tp);
8411 tg3_flag_clear(tp, CHIP_RESETTING);
8412 tg3_flag_clear(tp, ERROR_PROCESSED);
8415 if (tg3_flag(tp, 5780_CLASS))
8416 val = tr32(MEMARB_MODE);
8417 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8419 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8421 tw32(0x5000, 0x400);
8424 tw32(GRC_MODE, tp->grc_mode);
8426 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8429 tw32(0xc4, val | (1 << 15));
8432 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8434 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8435 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8436 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8437 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8441 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8443 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8444 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8449 tw32_f(MAC_MODE, val);
8452 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8454 err = tg3_poll_fw(tp);
8460 if (tg3_flag(tp, PCI_EXPRESS) &&
8461 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8462 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8463 !tg3_flag(tp, 57765_PLUS)) {
8466 tw32(0x7c00, val | (1 << 25));
8469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8470 val = tr32(TG3_CPMU_CLCK_ORIDE);
8471 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8474 /* Reprobe ASF enable state. */
8475 tg3_flag_clear(tp, ENABLE_ASF);
8476 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8477 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8478 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8481 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8482 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8483 tg3_flag_set(tp, ENABLE_ASF);
8484 tp->last_event_jiffies = jiffies;
8485 if (tg3_flag(tp, 5750_PLUS))
8486 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8493 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8494 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8496 /* tp->lock is held. */
8497 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8503 tg3_write_sig_pre_reset(tp, kind);
8505 tg3_abort_hw(tp, silent);
8506 err = tg3_chip_reset(tp);
8508 __tg3_set_mac_addr(tp, 0);
8510 tg3_write_sig_legacy(tp, kind);
8511 tg3_write_sig_post_reset(tp, kind);
8514 /* Save the stats across chip resets... */
8515 tg3_get_nstats(tp, &tp->net_stats_prev);
8516 tg3_get_estats(tp, &tp->estats_prev);
8518 /* And make sure the next sample is new data */
8519 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8528 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8530 struct tg3 *tp = netdev_priv(dev);
8531 struct sockaddr *addr = p;
8532 int err = 0, skip_mac_1 = 0;
8534 if (!is_valid_ether_addr(addr->sa_data))
8535 return -EADDRNOTAVAIL;
8537 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8539 if (!netif_running(dev))
8542 if (tg3_flag(tp, ENABLE_ASF)) {
8543 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8545 addr0_high = tr32(MAC_ADDR_0_HIGH);
8546 addr0_low = tr32(MAC_ADDR_0_LOW);
8547 addr1_high = tr32(MAC_ADDR_1_HIGH);
8548 addr1_low = tr32(MAC_ADDR_1_LOW);
8550 /* Skip MAC addr 1 if ASF is using it. */
8551 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8552 !(addr1_high == 0 && addr1_low == 0))
8555 spin_lock_bh(&tp->lock);
8556 __tg3_set_mac_addr(tp, skip_mac_1);
8557 spin_unlock_bh(&tp->lock);
8562 /* tp->lock is held. */
8563 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8564 dma_addr_t mapping, u32 maxlen_flags,
8568 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8569 ((u64) mapping >> 32));
8571 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8572 ((u64) mapping & 0xffffffff));
8574 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8577 if (!tg3_flag(tp, 5705_PLUS))
8579 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8584 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8588 if (!tg3_flag(tp, ENABLE_TSS)) {
8589 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8590 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8591 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8593 tw32(HOSTCC_TXCOL_TICKS, 0);
8594 tw32(HOSTCC_TXMAX_FRAMES, 0);
8595 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8597 for (; i < tp->txq_cnt; i++) {
8600 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8601 tw32(reg, ec->tx_coalesce_usecs);
8602 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8603 tw32(reg, ec->tx_max_coalesced_frames);
8604 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8605 tw32(reg, ec->tx_max_coalesced_frames_irq);
8609 for (; i < tp->irq_max - 1; i++) {
8610 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8611 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8612 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8616 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8619 u32 limit = tp->rxq_cnt;
8621 if (!tg3_flag(tp, ENABLE_RSS)) {
8622 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8623 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8624 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8627 tw32(HOSTCC_RXCOL_TICKS, 0);
8628 tw32(HOSTCC_RXMAX_FRAMES, 0);
8629 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8632 for (; i < limit; i++) {
8635 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8636 tw32(reg, ec->rx_coalesce_usecs);
8637 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8638 tw32(reg, ec->rx_max_coalesced_frames);
8639 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8640 tw32(reg, ec->rx_max_coalesced_frames_irq);
8643 for (; i < tp->irq_max - 1; i++) {
8644 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8645 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8646 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8650 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8652 tg3_coal_tx_init(tp, ec);
8653 tg3_coal_rx_init(tp, ec);
8655 if (!tg3_flag(tp, 5705_PLUS)) {
8656 u32 val = ec->stats_block_coalesce_usecs;
8658 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8659 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8664 tw32(HOSTCC_STAT_COAL_TICKS, val);
8668 /* tp->lock is held. */
8669 static void tg3_rings_reset(struct tg3 *tp)
8672 u32 stblk, txrcb, rxrcb, limit;
8673 struct tg3_napi *tnapi = &tp->napi[0];
8675 /* Disable all transmit rings but the first. */
8676 if (!tg3_flag(tp, 5705_PLUS))
8677 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8678 else if (tg3_flag(tp, 5717_PLUS))
8679 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8680 else if (tg3_flag(tp, 57765_CLASS) ||
8681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8682 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8684 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8686 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8687 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8688 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8689 BDINFO_FLAGS_DISABLED);
8692 /* Disable all receive return rings but the first. */
8693 if (tg3_flag(tp, 5717_PLUS))
8694 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8695 else if (!tg3_flag(tp, 5705_PLUS))
8696 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8697 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8698 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8699 tg3_flag(tp, 57765_CLASS))
8700 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8702 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8704 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8705 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8706 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8707 BDINFO_FLAGS_DISABLED);
8709 /* Disable interrupts */
8710 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8711 tp->napi[0].chk_msi_cnt = 0;
8712 tp->napi[0].last_rx_cons = 0;
8713 tp->napi[0].last_tx_cons = 0;
8715 /* Zero mailbox registers. */
8716 if (tg3_flag(tp, SUPPORT_MSIX)) {
8717 for (i = 1; i < tp->irq_max; i++) {
8718 tp->napi[i].tx_prod = 0;
8719 tp->napi[i].tx_cons = 0;
8720 if (tg3_flag(tp, ENABLE_TSS))
8721 tw32_mailbox(tp->napi[i].prodmbox, 0);
8722 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8723 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8724 tp->napi[i].chk_msi_cnt = 0;
8725 tp->napi[i].last_rx_cons = 0;
8726 tp->napi[i].last_tx_cons = 0;
8728 if (!tg3_flag(tp, ENABLE_TSS))
8729 tw32_mailbox(tp->napi[0].prodmbox, 0);
8731 tp->napi[0].tx_prod = 0;
8732 tp->napi[0].tx_cons = 0;
8733 tw32_mailbox(tp->napi[0].prodmbox, 0);
8734 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8737 /* Make sure the NIC-based send BD rings are disabled. */
8738 if (!tg3_flag(tp, 5705_PLUS)) {
8739 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8740 for (i = 0; i < 16; i++)
8741 tw32_tx_mbox(mbox + i * 8, 0);
8744 txrcb = NIC_SRAM_SEND_RCB;
8745 rxrcb = NIC_SRAM_RCV_RET_RCB;
8747 /* Clear status block in ram. */
8748 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8750 /* Set status block DMA address */
8751 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8752 ((u64) tnapi->status_mapping >> 32));
8753 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8754 ((u64) tnapi->status_mapping & 0xffffffff));
8756 if (tnapi->tx_ring) {
8757 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8758 (TG3_TX_RING_SIZE <<
8759 BDINFO_FLAGS_MAXLEN_SHIFT),
8760 NIC_SRAM_TX_BUFFER_DESC);
8761 txrcb += TG3_BDINFO_SIZE;
8764 if (tnapi->rx_rcb) {
8765 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8766 (tp->rx_ret_ring_mask + 1) <<
8767 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8768 rxrcb += TG3_BDINFO_SIZE;
8771 stblk = HOSTCC_STATBLCK_RING1;
8773 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8774 u64 mapping = (u64)tnapi->status_mapping;
8775 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8776 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8778 /* Clear status block in ram. */
8779 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8781 if (tnapi->tx_ring) {
8782 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8783 (TG3_TX_RING_SIZE <<
8784 BDINFO_FLAGS_MAXLEN_SHIFT),
8785 NIC_SRAM_TX_BUFFER_DESC);
8786 txrcb += TG3_BDINFO_SIZE;
8789 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8790 ((tp->rx_ret_ring_mask + 1) <<
8791 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8794 rxrcb += TG3_BDINFO_SIZE;
8798 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8800 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8802 if (!tg3_flag(tp, 5750_PLUS) ||
8803 tg3_flag(tp, 5780_CLASS) ||
8804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8806 tg3_flag(tp, 57765_PLUS))
8807 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8808 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8810 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8812 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8814 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8815 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8817 val = min(nic_rep_thresh, host_rep_thresh);
8818 tw32(RCVBDI_STD_THRESH, val);
8820 if (tg3_flag(tp, 57765_PLUS))
8821 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8823 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8826 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8828 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8830 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8831 tw32(RCVBDI_JUMBO_THRESH, val);
8833 if (tg3_flag(tp, 57765_PLUS))
8834 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8837 static inline u32 calc_crc(unsigned char *buf, int len)
8845 for (j = 0; j < len; j++) {
8848 for (k = 0; k < 8; k++) {
8861 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8863 /* accept or reject all multicast frames */
8864 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8865 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8866 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8867 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8870 static void __tg3_set_rx_mode(struct net_device *dev)
8872 struct tg3 *tp = netdev_priv(dev);
8875 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8876 RX_MODE_KEEP_VLAN_TAG);
8878 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8879 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8882 if (!tg3_flag(tp, ENABLE_ASF))
8883 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8886 if (dev->flags & IFF_PROMISC) {
8887 /* Promiscuous mode. */
8888 rx_mode |= RX_MODE_PROMISC;
8889 } else if (dev->flags & IFF_ALLMULTI) {
8890 /* Accept all multicast. */
8891 tg3_set_multi(tp, 1);
8892 } else if (netdev_mc_empty(dev)) {
8893 /* Reject all multicast. */
8894 tg3_set_multi(tp, 0);
8896 /* Accept one or more multicast(s). */
8897 struct netdev_hw_addr *ha;
8898 u32 mc_filter[4] = { 0, };
8903 netdev_for_each_mc_addr(ha, dev) {
8904 crc = calc_crc(ha->addr, ETH_ALEN);
8906 regidx = (bit & 0x60) >> 5;
8908 mc_filter[regidx] |= (1 << bit);
8911 tw32(MAC_HASH_REG_0, mc_filter[0]);
8912 tw32(MAC_HASH_REG_1, mc_filter[1]);
8913 tw32(MAC_HASH_REG_2, mc_filter[2]);
8914 tw32(MAC_HASH_REG_3, mc_filter[3]);
8917 if (rx_mode != tp->rx_mode) {
8918 tp->rx_mode = rx_mode;
8919 tw32_f(MAC_RX_MODE, rx_mode);
8924 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8928 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8929 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8932 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8936 if (!tg3_flag(tp, SUPPORT_MSIX))
8939 if (tp->rxq_cnt == 1) {
8940 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8944 /* Validate table against current IRQ count */
8945 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8946 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8950 if (i != TG3_RSS_INDIR_TBL_SIZE)
8951 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8954 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8957 u32 reg = MAC_RSS_INDIR_TBL_0;
8959 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8960 u32 val = tp->rss_ind_tbl[i];
8962 for (; i % 8; i++) {
8964 val |= tp->rss_ind_tbl[i];
8971 /* tp->lock is held. */
8972 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8974 u32 val, rdmac_mode;
8976 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8978 tg3_disable_ints(tp);
8982 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8984 if (tg3_flag(tp, INIT_COMPLETE))
8985 tg3_abort_hw(tp, 1);
8987 /* Enable MAC control of LPI */
8988 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8989 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8990 TG3_CPMU_EEE_LNKIDL_UART_IDL;
8991 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8992 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
8994 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
8996 tw32_f(TG3_CPMU_EEE_CTRL,
8997 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8999 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9000 TG3_CPMU_EEEMD_LPI_IN_TX |
9001 TG3_CPMU_EEEMD_LPI_IN_RX |
9002 TG3_CPMU_EEEMD_EEE_ENABLE;
9004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9005 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9007 if (tg3_flag(tp, ENABLE_APE))
9008 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9010 tw32_f(TG3_CPMU_EEE_MODE, val);
9012 tw32_f(TG3_CPMU_EEE_DBTMR1,
9013 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9014 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9016 tw32_f(TG3_CPMU_EEE_DBTMR2,
9017 TG3_CPMU_DBTMR2_APE_TX_2047US |
9018 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9024 err = tg3_chip_reset(tp);
9028 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9030 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9031 val = tr32(TG3_CPMU_CTRL);
9032 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9033 tw32(TG3_CPMU_CTRL, val);
9035 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9036 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9037 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9038 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9040 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9041 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9042 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9043 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9045 val = tr32(TG3_CPMU_HST_ACC);
9046 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9047 val |= CPMU_HST_ACC_MACCLK_6_25;
9048 tw32(TG3_CPMU_HST_ACC, val);
9051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9052 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9053 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9054 PCIE_PWR_MGMT_L1_THRESH_4MS;
9055 tw32(PCIE_PWR_MGMT_THRESH, val);
9057 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9058 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9060 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9062 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9063 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9066 if (tg3_flag(tp, L1PLLPD_EN)) {
9067 u32 grc_mode = tr32(GRC_MODE);
9069 /* Access the lower 1K of PL PCIE block registers. */
9070 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9071 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9073 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9074 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9075 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9077 tw32(GRC_MODE, grc_mode);
9080 if (tg3_flag(tp, 57765_CLASS)) {
9081 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9082 u32 grc_mode = tr32(GRC_MODE);
9084 /* Access the lower 1K of PL PCIE block registers. */
9085 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9086 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9088 val = tr32(TG3_PCIE_TLDLPL_PORT +
9089 TG3_PCIE_PL_LO_PHYCTL5);
9090 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9091 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9093 tw32(GRC_MODE, grc_mode);
9096 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9097 u32 grc_mode = tr32(GRC_MODE);
9099 /* Access the lower 1K of DL PCIE block registers. */
9100 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9101 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9103 val = tr32(TG3_PCIE_TLDLPL_PORT +
9104 TG3_PCIE_DL_LO_FTSMAX);
9105 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9106 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9107 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9109 tw32(GRC_MODE, grc_mode);
9112 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9113 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9114 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9115 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9118 /* This works around an issue with Athlon chipsets on
9119 * B3 tigon3 silicon. This bit has no effect on any
9120 * other revision. But do not set this on PCI Express
9121 * chips and don't even touch the clocks if the CPMU is present.
9123 if (!tg3_flag(tp, CPMU_PRESENT)) {
9124 if (!tg3_flag(tp, PCI_EXPRESS))
9125 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9126 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9129 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9130 tg3_flag(tp, PCIX_MODE)) {
9131 val = tr32(TG3PCI_PCISTATE);
9132 val |= PCISTATE_RETRY_SAME_DMA;
9133 tw32(TG3PCI_PCISTATE, val);
9136 if (tg3_flag(tp, ENABLE_APE)) {
9137 /* Allow reads and writes to the
9138 * APE register and memory space.
9140 val = tr32(TG3PCI_PCISTATE);
9141 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9142 PCISTATE_ALLOW_APE_SHMEM_WR |
9143 PCISTATE_ALLOW_APE_PSPACE_WR;
9144 tw32(TG3PCI_PCISTATE, val);
9147 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9148 /* Enable some hw fixes. */
9149 val = tr32(TG3PCI_MSI_DATA);
9150 val |= (1 << 26) | (1 << 28) | (1 << 29);
9151 tw32(TG3PCI_MSI_DATA, val);
9154 /* Descriptor ring init may make accesses to the
9155 * NIC SRAM area to setup the TX descriptors, so we
9156 * can only do this after the hardware has been
9157 * successfully reset.
9159 err = tg3_init_rings(tp);
9163 if (tg3_flag(tp, 57765_PLUS)) {
9164 val = tr32(TG3PCI_DMA_RW_CTRL) &
9165 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9166 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9167 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9168 if (!tg3_flag(tp, 57765_CLASS) &&
9169 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9170 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9171 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9172 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9173 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9174 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9175 /* This value is determined during the probe time DMA
9176 * engine test, tg3_test_dma.
9178 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9181 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9182 GRC_MODE_4X_NIC_SEND_RINGS |
9183 GRC_MODE_NO_TX_PHDR_CSUM |
9184 GRC_MODE_NO_RX_PHDR_CSUM);
9185 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9187 /* Pseudo-header checksum is done by hardware logic and not
9188 * the offload processers, so make the chip do the pseudo-
9189 * header checksums on receive. For transmit it is more
9190 * convenient to do the pseudo-header checksum in software
9191 * as Linux does that on transmit for us in all cases.
9193 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9195 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9197 tw32(TG3_RX_PTP_CTL,
9198 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9200 if (tg3_flag(tp, PTP_CAPABLE))
9201 val |= GRC_MODE_TIME_SYNC_ENABLE;
9203 tw32(GRC_MODE, tp->grc_mode | val);
9205 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9206 val = tr32(GRC_MISC_CFG);
9208 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9209 tw32(GRC_MISC_CFG, val);
9211 /* Initialize MBUF/DESC pool. */
9212 if (tg3_flag(tp, 5750_PLUS)) {
9214 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9215 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9217 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9219 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9220 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9221 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9222 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9225 fw_len = tp->fw_len;
9226 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9227 tw32(BUFMGR_MB_POOL_ADDR,
9228 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9229 tw32(BUFMGR_MB_POOL_SIZE,
9230 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9233 if (tp->dev->mtu <= ETH_DATA_LEN) {
9234 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9235 tp->bufmgr_config.mbuf_read_dma_low_water);
9236 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9237 tp->bufmgr_config.mbuf_mac_rx_low_water);
9238 tw32(BUFMGR_MB_HIGH_WATER,
9239 tp->bufmgr_config.mbuf_high_water);
9241 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9242 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9243 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9244 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9245 tw32(BUFMGR_MB_HIGH_WATER,
9246 tp->bufmgr_config.mbuf_high_water_jumbo);
9248 tw32(BUFMGR_DMA_LOW_WATER,
9249 tp->bufmgr_config.dma_low_water);
9250 tw32(BUFMGR_DMA_HIGH_WATER,
9251 tp->bufmgr_config.dma_high_water);
9253 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9255 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9257 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9258 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9259 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9260 tw32(BUFMGR_MODE, val);
9261 for (i = 0; i < 2000; i++) {
9262 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9267 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9271 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9272 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9274 tg3_setup_rxbd_thresholds(tp);
9276 /* Initialize TG3_BDINFO's at:
9277 * RCVDBDI_STD_BD: standard eth size rx ring
9278 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9279 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9282 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9283 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9284 * ring attribute flags
9285 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9287 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9288 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9290 * The size of each ring is fixed in the firmware, but the location is
9293 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9294 ((u64) tpr->rx_std_mapping >> 32));
9295 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9296 ((u64) tpr->rx_std_mapping & 0xffffffff));
9297 if (!tg3_flag(tp, 5717_PLUS))
9298 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9299 NIC_SRAM_RX_BUFFER_DESC);
9301 /* Disable the mini ring */
9302 if (!tg3_flag(tp, 5705_PLUS))
9303 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9304 BDINFO_FLAGS_DISABLED);
9306 /* Program the jumbo buffer descriptor ring control
9307 * blocks on those devices that have them.
9309 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9310 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9312 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9313 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9314 ((u64) tpr->rx_jmb_mapping >> 32));
9315 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9316 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9317 val = TG3_RX_JMB_RING_SIZE(tp) <<
9318 BDINFO_FLAGS_MAXLEN_SHIFT;
9319 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9320 val | BDINFO_FLAGS_USE_EXT_RECV);
9321 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9322 tg3_flag(tp, 57765_CLASS) ||
9323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9324 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9325 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9327 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9328 BDINFO_FLAGS_DISABLED);
9331 if (tg3_flag(tp, 57765_PLUS)) {
9332 val = TG3_RX_STD_RING_SIZE(tp);
9333 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9334 val |= (TG3_RX_STD_DMA_SZ << 2);
9336 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9338 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9340 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9342 tpr->rx_std_prod_idx = tp->rx_pending;
9343 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9345 tpr->rx_jmb_prod_idx =
9346 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9347 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9349 tg3_rings_reset(tp);
9351 /* Initialize MAC address and backoff seed. */
9352 __tg3_set_mac_addr(tp, 0);
9354 /* MTU + ethernet header + FCS + optional VLAN tag */
9355 tw32(MAC_RX_MTU_SIZE,
9356 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9358 /* The slot time is changed by tg3_setup_phy if we
9359 * run at gigabit with half duplex.
9361 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9362 (6 << TX_LENGTHS_IPG_SHIFT) |
9363 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9365 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9366 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9367 val |= tr32(MAC_TX_LENGTHS) &
9368 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9369 TX_LENGTHS_CNT_DWN_VAL_MSK);
9371 tw32(MAC_TX_LENGTHS, val);
9373 /* Receive rules. */
9374 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9375 tw32(RCVLPC_CONFIG, 0x0181);
9377 /* Calculate RDMAC_MODE setting early, we need it to determine
9378 * the RCVLPC_STATE_ENABLE mask.
9380 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9381 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9382 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9383 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9384 RDMAC_MODE_LNGREAD_ENAB);
9386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9387 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9389 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9392 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9393 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9394 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9397 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9398 if (tg3_flag(tp, TSO_CAPABLE) &&
9399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9400 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9401 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9402 !tg3_flag(tp, IS_5788)) {
9403 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9407 if (tg3_flag(tp, PCI_EXPRESS))
9408 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9410 if (tg3_flag(tp, HW_TSO_1) ||
9411 tg3_flag(tp, HW_TSO_2) ||
9412 tg3_flag(tp, HW_TSO_3))
9413 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9415 if (tg3_flag(tp, 57765_PLUS) ||
9416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9418 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9422 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9426 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9428 tg3_flag(tp, 57765_PLUS)) {
9431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9432 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9434 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9437 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9439 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9440 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9441 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9442 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9443 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9444 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9446 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9454 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9455 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9457 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9461 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9462 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9465 /* Receive/send statistics. */
9466 if (tg3_flag(tp, 5750_PLUS)) {
9467 val = tr32(RCVLPC_STATS_ENABLE);
9468 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9469 tw32(RCVLPC_STATS_ENABLE, val);
9470 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9471 tg3_flag(tp, TSO_CAPABLE)) {
9472 val = tr32(RCVLPC_STATS_ENABLE);
9473 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9474 tw32(RCVLPC_STATS_ENABLE, val);
9476 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9478 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9479 tw32(SNDDATAI_STATSENAB, 0xffffff);
9480 tw32(SNDDATAI_STATSCTRL,
9481 (SNDDATAI_SCTRL_ENABLE |
9482 SNDDATAI_SCTRL_FASTUPD));
9484 /* Setup host coalescing engine. */
9485 tw32(HOSTCC_MODE, 0);
9486 for (i = 0; i < 2000; i++) {
9487 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9492 __tg3_set_coalesce(tp, &tp->coal);
9494 if (!tg3_flag(tp, 5705_PLUS)) {
9495 /* Status/statistics block address. See tg3_timer,
9496 * the tg3_periodic_fetch_stats call there, and
9497 * tg3_get_stats to see how this works for 5705/5750 chips.
9499 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9500 ((u64) tp->stats_mapping >> 32));
9501 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9502 ((u64) tp->stats_mapping & 0xffffffff));
9503 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9505 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9507 /* Clear statistics and status block memory areas */
9508 for (i = NIC_SRAM_STATS_BLK;
9509 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9511 tg3_write_mem(tp, i, 0);
9516 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9518 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9519 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9520 if (!tg3_flag(tp, 5705_PLUS))
9521 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9523 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9524 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9525 /* reset to prevent losing 1st rx packet intermittently */
9526 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9530 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9531 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9532 MAC_MODE_FHDE_ENABLE;
9533 if (tg3_flag(tp, ENABLE_APE))
9534 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9535 if (!tg3_flag(tp, 5705_PLUS) &&
9536 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9537 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9538 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9539 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9542 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9543 * If TG3_FLAG_IS_NIC is zero, we should read the
9544 * register to preserve the GPIO settings for LOMs. The GPIOs,
9545 * whether used as inputs or outputs, are set by boot code after
9548 if (!tg3_flag(tp, IS_NIC)) {
9551 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9552 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9553 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9556 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9557 GRC_LCLCTRL_GPIO_OUTPUT3;
9559 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9560 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9562 tp->grc_local_ctrl &= ~gpio_mask;
9563 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9565 /* GPIO1 must be driven high for eeprom write protect */
9566 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9567 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9568 GRC_LCLCTRL_GPIO_OUTPUT1);
9570 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9573 if (tg3_flag(tp, USING_MSIX)) {
9574 val = tr32(MSGINT_MODE);
9575 val |= MSGINT_MODE_ENABLE;
9576 if (tp->irq_cnt > 1)
9577 val |= MSGINT_MODE_MULTIVEC_EN;
9578 if (!tg3_flag(tp, 1SHOT_MSI))
9579 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9580 tw32(MSGINT_MODE, val);
9583 if (!tg3_flag(tp, 5705_PLUS)) {
9584 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9588 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9589 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9590 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9591 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9592 WDMAC_MODE_LNGREAD_ENAB);
9594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9595 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9596 if (tg3_flag(tp, TSO_CAPABLE) &&
9597 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9598 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9600 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9601 !tg3_flag(tp, IS_5788)) {
9602 val |= WDMAC_MODE_RX_ACCEL;
9606 /* Enable host coalescing bug fix */
9607 if (tg3_flag(tp, 5755_PLUS))
9608 val |= WDMAC_MODE_STATUS_TAG_FIX;
9610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9611 val |= WDMAC_MODE_BURST_ALL_DATA;
9613 tw32_f(WDMAC_MODE, val);
9616 if (tg3_flag(tp, PCIX_MODE)) {
9619 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9622 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9623 pcix_cmd |= PCI_X_CMD_READ_2K;
9624 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9625 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9626 pcix_cmd |= PCI_X_CMD_READ_2K;
9628 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9632 tw32_f(RDMAC_MODE, rdmac_mode);
9635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9636 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9637 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9640 if (i < TG3_NUM_RDMA_CHANNELS) {
9641 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9642 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9643 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9644 tg3_flag_set(tp, 5719_RDMA_BUG);
9648 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9649 if (!tg3_flag(tp, 5705_PLUS))
9650 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9654 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9656 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9658 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9659 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9660 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9661 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9662 val |= RCVDBDI_MODE_LRG_RING_SZ;
9663 tw32(RCVDBDI_MODE, val);
9664 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9665 if (tg3_flag(tp, HW_TSO_1) ||
9666 tg3_flag(tp, HW_TSO_2) ||
9667 tg3_flag(tp, HW_TSO_3))
9668 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9669 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9670 if (tg3_flag(tp, ENABLE_TSS))
9671 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9672 tw32(SNDBDI_MODE, val);
9673 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9675 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9676 err = tg3_load_5701_a0_firmware_fix(tp);
9681 if (tg3_flag(tp, TSO_CAPABLE)) {
9682 err = tg3_load_tso_firmware(tp);
9687 tp->tx_mode = TX_MODE_ENABLE;
9689 if (tg3_flag(tp, 5755_PLUS) ||
9690 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9691 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9695 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9696 tp->tx_mode &= ~val;
9697 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9700 tw32_f(MAC_TX_MODE, tp->tx_mode);
9703 if (tg3_flag(tp, ENABLE_RSS)) {
9704 tg3_rss_write_indir_tbl(tp);
9706 /* Setup the "secret" hash key. */
9707 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9708 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9709 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9710 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9711 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9712 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9713 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9714 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9715 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9716 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9719 tp->rx_mode = RX_MODE_ENABLE;
9720 if (tg3_flag(tp, 5755_PLUS))
9721 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9723 if (tg3_flag(tp, ENABLE_RSS))
9724 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9725 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9726 RX_MODE_RSS_IPV6_HASH_EN |
9727 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9728 RX_MODE_RSS_IPV4_HASH_EN |
9729 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9731 tw32_f(MAC_RX_MODE, tp->rx_mode);
9734 tw32(MAC_LED_CTRL, tp->led_ctrl);
9736 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9737 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9738 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9741 tw32_f(MAC_RX_MODE, tp->rx_mode);
9744 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9745 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9746 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9747 /* Set drive transmission level to 1.2V */
9748 /* only if the signal pre-emphasis bit is not set */
9749 val = tr32(MAC_SERDES_CFG);
9752 tw32(MAC_SERDES_CFG, val);
9754 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9755 tw32(MAC_SERDES_CFG, 0x616000);
9758 /* Prevent chip from dropping frames when flow control
9761 if (tg3_flag(tp, 57765_CLASS))
9765 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9768 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9769 /* Use hardware link auto-negotiation */
9770 tg3_flag_set(tp, HW_AUTONEG);
9773 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9774 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9777 tmp = tr32(SERDES_RX_CTRL);
9778 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9779 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9780 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9781 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9784 if (!tg3_flag(tp, USE_PHYLIB)) {
9785 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9786 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9788 err = tg3_setup_phy(tp, 0);
9792 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9793 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9796 /* Clear CRC stats. */
9797 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9798 tg3_writephy(tp, MII_TG3_TEST1,
9799 tmp | MII_TG3_TEST1_CRC_EN);
9800 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9805 __tg3_set_rx_mode(tp->dev);
9807 /* Initialize receive rules. */
9808 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9809 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9810 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9811 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9813 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9817 if (tg3_flag(tp, ENABLE_ASF))
9821 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9823 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9825 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9827 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9829 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9831 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9833 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9835 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9837 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9839 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9841 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9843 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9845 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9847 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9855 if (tg3_flag(tp, ENABLE_APE))
9856 /* Write our heartbeat update interval to APE. */
9857 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9858 APE_HOST_HEARTBEAT_INT_DISABLE);
9860 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9865 /* Called at device open time to get the chip ready for
9866 * packet processing. Invoked with tp->lock held.
9868 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9870 tg3_switch_clocks(tp);
9872 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9874 return tg3_reset_hw(tp, reset_phy);
9877 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9881 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9882 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9884 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9887 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9888 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9889 memset(ocir, 0, TG3_OCIR_LEN);
9893 /* sysfs attributes for hwmon */
9894 static ssize_t tg3_show_temp(struct device *dev,
9895 struct device_attribute *devattr, char *buf)
9897 struct pci_dev *pdev = to_pci_dev(dev);
9898 struct net_device *netdev = pci_get_drvdata(pdev);
9899 struct tg3 *tp = netdev_priv(netdev);
9900 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9903 spin_lock_bh(&tp->lock);
9904 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9905 sizeof(temperature));
9906 spin_unlock_bh(&tp->lock);
9907 return sprintf(buf, "%u\n", temperature);
9911 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9912 TG3_TEMP_SENSOR_OFFSET);
9913 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9914 TG3_TEMP_CAUTION_OFFSET);
9915 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9916 TG3_TEMP_MAX_OFFSET);
9918 static struct attribute *tg3_attributes[] = {
9919 &sensor_dev_attr_temp1_input.dev_attr.attr,
9920 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9921 &sensor_dev_attr_temp1_max.dev_attr.attr,
9925 static const struct attribute_group tg3_group = {
9926 .attrs = tg3_attributes,
9929 static void tg3_hwmon_close(struct tg3 *tp)
9931 if (tp->hwmon_dev) {
9932 hwmon_device_unregister(tp->hwmon_dev);
9933 tp->hwmon_dev = NULL;
9934 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9938 static void tg3_hwmon_open(struct tg3 *tp)
9942 struct pci_dev *pdev = tp->pdev;
9943 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9945 tg3_sd_scan_scratchpad(tp, ocirs);
9947 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9948 if (!ocirs[i].src_data_length)
9951 size += ocirs[i].src_hdr_length;
9952 size += ocirs[i].src_data_length;
9958 /* Register hwmon sysfs hooks */
9959 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9961 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9965 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9966 if (IS_ERR(tp->hwmon_dev)) {
9967 tp->hwmon_dev = NULL;
9968 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9969 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9974 #define TG3_STAT_ADD32(PSTAT, REG) \
9975 do { u32 __val = tr32(REG); \
9976 (PSTAT)->low += __val; \
9977 if ((PSTAT)->low < __val) \
9978 (PSTAT)->high += 1; \
9981 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9983 struct tg3_hw_stats *sp = tp->hw_stats;
9988 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9989 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9990 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9991 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9992 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9993 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9994 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9995 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9996 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9997 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9998 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9999 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10000 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10001 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10002 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10003 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10006 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10007 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10008 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10009 tg3_flag_clear(tp, 5719_RDMA_BUG);
10012 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10013 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10014 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10015 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10016 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10017 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10018 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10019 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10020 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10021 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10022 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10023 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10024 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10025 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10027 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10028 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10029 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10030 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10031 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10033 u32 val = tr32(HOSTCC_FLOW_ATTN);
10034 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10036 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10037 sp->rx_discards.low += val;
10038 if (sp->rx_discards.low < val)
10039 sp->rx_discards.high += 1;
10041 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10043 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10046 static void tg3_chk_missed_msi(struct tg3 *tp)
10050 for (i = 0; i < tp->irq_cnt; i++) {
10051 struct tg3_napi *tnapi = &tp->napi[i];
10053 if (tg3_has_work(tnapi)) {
10054 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10055 tnapi->last_tx_cons == tnapi->tx_cons) {
10056 if (tnapi->chk_msi_cnt < 1) {
10057 tnapi->chk_msi_cnt++;
10063 tnapi->chk_msi_cnt = 0;
10064 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10065 tnapi->last_tx_cons = tnapi->tx_cons;
10069 static void tg3_timer(unsigned long __opaque)
10071 struct tg3 *tp = (struct tg3 *) __opaque;
10073 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10074 goto restart_timer;
10076 spin_lock(&tp->lock);
10078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10079 tg3_flag(tp, 57765_CLASS))
10080 tg3_chk_missed_msi(tp);
10082 if (!tg3_flag(tp, TAGGED_STATUS)) {
10083 /* All of this garbage is because when using non-tagged
10084 * IRQ status the mailbox/status_block protocol the chip
10085 * uses with the cpu is race prone.
10087 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10088 tw32(GRC_LOCAL_CTRL,
10089 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10091 tw32(HOSTCC_MODE, tp->coalesce_mode |
10092 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10095 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10096 spin_unlock(&tp->lock);
10097 tg3_reset_task_schedule(tp);
10098 goto restart_timer;
10102 /* This part only runs once per second. */
10103 if (!--tp->timer_counter) {
10104 if (tg3_flag(tp, 5705_PLUS))
10105 tg3_periodic_fetch_stats(tp);
10107 if (tp->setlpicnt && !--tp->setlpicnt)
10108 tg3_phy_eee_enable(tp);
10110 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10114 mac_stat = tr32(MAC_STATUS);
10117 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10118 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10120 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10124 tg3_setup_phy(tp, 0);
10125 } else if (tg3_flag(tp, POLL_SERDES)) {
10126 u32 mac_stat = tr32(MAC_STATUS);
10127 int need_setup = 0;
10130 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10133 if (!tp->link_up &&
10134 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10135 MAC_STATUS_SIGNAL_DET))) {
10139 if (!tp->serdes_counter) {
10142 ~MAC_MODE_PORT_MODE_MASK));
10144 tw32_f(MAC_MODE, tp->mac_mode);
10147 tg3_setup_phy(tp, 0);
10149 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10150 tg3_flag(tp, 5780_CLASS)) {
10151 tg3_serdes_parallel_detect(tp);
10154 tp->timer_counter = tp->timer_multiplier;
10157 /* Heartbeat is only sent once every 2 seconds.
10159 * The heartbeat is to tell the ASF firmware that the host
10160 * driver is still alive. In the event that the OS crashes,
10161 * ASF needs to reset the hardware to free up the FIFO space
10162 * that may be filled with rx packets destined for the host.
10163 * If the FIFO is full, ASF will no longer function properly.
10165 * Unintended resets have been reported on real time kernels
10166 * where the timer doesn't run on time. Netpoll will also have
10169 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10170 * to check the ring condition when the heartbeat is expiring
10171 * before doing the reset. This will prevent most unintended
10174 if (!--tp->asf_counter) {
10175 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10176 tg3_wait_for_event_ack(tp);
10178 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10179 FWCMD_NICDRV_ALIVE3);
10180 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10181 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10182 TG3_FW_UPDATE_TIMEOUT_SEC);
10184 tg3_generate_fw_event(tp);
10186 tp->asf_counter = tp->asf_multiplier;
10189 spin_unlock(&tp->lock);
10192 tp->timer.expires = jiffies + tp->timer_offset;
10193 add_timer(&tp->timer);
10196 static void tg3_timer_init(struct tg3 *tp)
10198 if (tg3_flag(tp, TAGGED_STATUS) &&
10199 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10200 !tg3_flag(tp, 57765_CLASS))
10201 tp->timer_offset = HZ;
10203 tp->timer_offset = HZ / 10;
10205 BUG_ON(tp->timer_offset > HZ);
10207 tp->timer_multiplier = (HZ / tp->timer_offset);
10208 tp->asf_multiplier = (HZ / tp->timer_offset) *
10209 TG3_FW_UPDATE_FREQ_SEC;
10211 init_timer(&tp->timer);
10212 tp->timer.data = (unsigned long) tp;
10213 tp->timer.function = tg3_timer;
10216 static void tg3_timer_start(struct tg3 *tp)
10218 tp->asf_counter = tp->asf_multiplier;
10219 tp->timer_counter = tp->timer_multiplier;
10221 tp->timer.expires = jiffies + tp->timer_offset;
10222 add_timer(&tp->timer);
10225 static void tg3_timer_stop(struct tg3 *tp)
10227 del_timer_sync(&tp->timer);
10230 /* Restart hardware after configuration changes, self-test, etc.
10231 * Invoked with tp->lock held.
10233 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10234 __releases(tp->lock)
10235 __acquires(tp->lock)
10239 err = tg3_init_hw(tp, reset_phy);
10241 netdev_err(tp->dev,
10242 "Failed to re-initialize device, aborting\n");
10243 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10244 tg3_full_unlock(tp);
10245 tg3_timer_stop(tp);
10247 tg3_napi_enable(tp);
10248 dev_close(tp->dev);
10249 tg3_full_lock(tp, 0);
10254 static void tg3_reset_task(struct work_struct *work)
10256 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10259 tg3_full_lock(tp, 0);
10261 if (!netif_running(tp->dev)) {
10262 tg3_flag_clear(tp, RESET_TASK_PENDING);
10263 tg3_full_unlock(tp);
10267 tg3_full_unlock(tp);
10271 tg3_netif_stop(tp);
10273 tg3_full_lock(tp, 1);
10275 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10276 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10277 tp->write32_rx_mbox = tg3_write_flush_reg32;
10278 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10279 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10282 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10283 err = tg3_init_hw(tp, 1);
10287 tg3_netif_start(tp);
10290 tg3_full_unlock(tp);
10295 tg3_flag_clear(tp, RESET_TASK_PENDING);
10298 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10301 unsigned long flags;
10303 struct tg3_napi *tnapi = &tp->napi[irq_num];
10305 if (tp->irq_cnt == 1)
10306 name = tp->dev->name;
10308 name = &tnapi->irq_lbl[0];
10309 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10310 name[IFNAMSIZ-1] = 0;
10313 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10315 if (tg3_flag(tp, 1SHOT_MSI))
10316 fn = tg3_msi_1shot;
10319 fn = tg3_interrupt;
10320 if (tg3_flag(tp, TAGGED_STATUS))
10321 fn = tg3_interrupt_tagged;
10322 flags = IRQF_SHARED;
10325 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10328 static int tg3_test_interrupt(struct tg3 *tp)
10330 struct tg3_napi *tnapi = &tp->napi[0];
10331 struct net_device *dev = tp->dev;
10332 int err, i, intr_ok = 0;
10335 if (!netif_running(dev))
10338 tg3_disable_ints(tp);
10340 free_irq(tnapi->irq_vec, tnapi);
10343 * Turn off MSI one shot mode. Otherwise this test has no
10344 * observable way to know whether the interrupt was delivered.
10346 if (tg3_flag(tp, 57765_PLUS)) {
10347 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10348 tw32(MSGINT_MODE, val);
10351 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10352 IRQF_SHARED, dev->name, tnapi);
10356 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10357 tg3_enable_ints(tp);
10359 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10362 for (i = 0; i < 5; i++) {
10363 u32 int_mbox, misc_host_ctrl;
10365 int_mbox = tr32_mailbox(tnapi->int_mbox);
10366 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10368 if ((int_mbox != 0) ||
10369 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10374 if (tg3_flag(tp, 57765_PLUS) &&
10375 tnapi->hw_status->status_tag != tnapi->last_tag)
10376 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10381 tg3_disable_ints(tp);
10383 free_irq(tnapi->irq_vec, tnapi);
10385 err = tg3_request_irq(tp, 0);
10391 /* Reenable MSI one shot mode. */
10392 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10393 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10394 tw32(MSGINT_MODE, val);
10402 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10403 * successfully restored
10405 static int tg3_test_msi(struct tg3 *tp)
10410 if (!tg3_flag(tp, USING_MSI))
10413 /* Turn off SERR reporting in case MSI terminates with Master
10416 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10417 pci_write_config_word(tp->pdev, PCI_COMMAND,
10418 pci_cmd & ~PCI_COMMAND_SERR);
10420 err = tg3_test_interrupt(tp);
10422 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10427 /* other failures */
10431 /* MSI test failed, go back to INTx mode */
10432 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10433 "to INTx mode. Please report this failure to the PCI "
10434 "maintainer and include system chipset information\n");
10436 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10438 pci_disable_msi(tp->pdev);
10440 tg3_flag_clear(tp, USING_MSI);
10441 tp->napi[0].irq_vec = tp->pdev->irq;
10443 err = tg3_request_irq(tp, 0);
10447 /* Need to reset the chip because the MSI cycle may have terminated
10448 * with Master Abort.
10450 tg3_full_lock(tp, 1);
10452 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10453 err = tg3_init_hw(tp, 1);
10455 tg3_full_unlock(tp);
10458 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10463 static int tg3_request_firmware(struct tg3 *tp)
10465 const __be32 *fw_data;
10467 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10468 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10473 fw_data = (void *)tp->fw->data;
10475 /* Firmware blob starts with version numbers, followed by
10476 * start address and _full_ length including BSS sections
10477 * (which must be longer than the actual data, of course
10480 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10481 if (tp->fw_len < (tp->fw->size - 12)) {
10482 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10483 tp->fw_len, tp->fw_needed);
10484 release_firmware(tp->fw);
10489 /* We no longer need firmware; we have it. */
10490 tp->fw_needed = NULL;
10494 static u32 tg3_irq_count(struct tg3 *tp)
10496 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10499 /* We want as many rx rings enabled as there are cpus.
10500 * In multiqueue MSI-X mode, the first MSI-X vector
10501 * only deals with link interrupts, etc, so we add
10502 * one to the number of vectors we are requesting.
10504 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10510 static bool tg3_enable_msix(struct tg3 *tp)
10513 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10515 tp->txq_cnt = tp->txq_req;
10516 tp->rxq_cnt = tp->rxq_req;
10518 tp->rxq_cnt = netif_get_num_default_rss_queues();
10519 if (tp->rxq_cnt > tp->rxq_max)
10520 tp->rxq_cnt = tp->rxq_max;
10522 /* Disable multiple TX rings by default. Simple round-robin hardware
10523 * scheduling of the TX rings can cause starvation of rings with
10524 * small packets when other rings have TSO or jumbo packets.
10529 tp->irq_cnt = tg3_irq_count(tp);
10531 for (i = 0; i < tp->irq_max; i++) {
10532 msix_ent[i].entry = i;
10533 msix_ent[i].vector = 0;
10536 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10539 } else if (rc != 0) {
10540 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10542 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10545 tp->rxq_cnt = max(rc - 1, 1);
10547 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10550 for (i = 0; i < tp->irq_max; i++)
10551 tp->napi[i].irq_vec = msix_ent[i].vector;
10553 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10554 pci_disable_msix(tp->pdev);
10558 if (tp->irq_cnt == 1)
10561 tg3_flag_set(tp, ENABLE_RSS);
10563 if (tp->txq_cnt > 1)
10564 tg3_flag_set(tp, ENABLE_TSS);
10566 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10571 static void tg3_ints_init(struct tg3 *tp)
10573 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10574 !tg3_flag(tp, TAGGED_STATUS)) {
10575 /* All MSI supporting chips should support tagged
10576 * status. Assert that this is the case.
10578 netdev_warn(tp->dev,
10579 "MSI without TAGGED_STATUS? Not using MSI\n");
10583 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10584 tg3_flag_set(tp, USING_MSIX);
10585 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10586 tg3_flag_set(tp, USING_MSI);
10588 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10589 u32 msi_mode = tr32(MSGINT_MODE);
10590 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10591 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10592 if (!tg3_flag(tp, 1SHOT_MSI))
10593 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10594 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10597 if (!tg3_flag(tp, USING_MSIX)) {
10599 tp->napi[0].irq_vec = tp->pdev->irq;
10602 if (tp->irq_cnt == 1) {
10605 netif_set_real_num_tx_queues(tp->dev, 1);
10606 netif_set_real_num_rx_queues(tp->dev, 1);
10610 static void tg3_ints_fini(struct tg3 *tp)
10612 if (tg3_flag(tp, USING_MSIX))
10613 pci_disable_msix(tp->pdev);
10614 else if (tg3_flag(tp, USING_MSI))
10615 pci_disable_msi(tp->pdev);
10616 tg3_flag_clear(tp, USING_MSI);
10617 tg3_flag_clear(tp, USING_MSIX);
10618 tg3_flag_clear(tp, ENABLE_RSS);
10619 tg3_flag_clear(tp, ENABLE_TSS);
10622 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10625 struct net_device *dev = tp->dev;
10629 * Setup interrupts first so we know how
10630 * many NAPI resources to allocate
10634 tg3_rss_check_indir_tbl(tp);
10636 /* The placement of this call is tied
10637 * to the setup and use of Host TX descriptors.
10639 err = tg3_alloc_consistent(tp);
10645 tg3_napi_enable(tp);
10647 for (i = 0; i < tp->irq_cnt; i++) {
10648 struct tg3_napi *tnapi = &tp->napi[i];
10649 err = tg3_request_irq(tp, i);
10651 for (i--; i >= 0; i--) {
10652 tnapi = &tp->napi[i];
10653 free_irq(tnapi->irq_vec, tnapi);
10659 tg3_full_lock(tp, 0);
10661 err = tg3_init_hw(tp, reset_phy);
10663 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10664 tg3_free_rings(tp);
10667 tg3_full_unlock(tp);
10672 if (test_irq && tg3_flag(tp, USING_MSI)) {
10673 err = tg3_test_msi(tp);
10676 tg3_full_lock(tp, 0);
10677 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10678 tg3_free_rings(tp);
10679 tg3_full_unlock(tp);
10684 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10685 u32 val = tr32(PCIE_TRANSACTION_CFG);
10687 tw32(PCIE_TRANSACTION_CFG,
10688 val | PCIE_TRANS_CFG_1SHOT_MSI);
10694 tg3_hwmon_open(tp);
10696 tg3_full_lock(tp, 0);
10698 tg3_timer_start(tp);
10699 tg3_flag_set(tp, INIT_COMPLETE);
10700 tg3_enable_ints(tp);
10705 tg3_ptp_resume(tp);
10708 tg3_full_unlock(tp);
10710 netif_tx_start_all_queues(dev);
10713 * Reset loopback feature if it was turned on while the device was down
10714 * make sure that it's installed properly now.
10716 if (dev->features & NETIF_F_LOOPBACK)
10717 tg3_set_loopback(dev, dev->features);
10722 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10723 struct tg3_napi *tnapi = &tp->napi[i];
10724 free_irq(tnapi->irq_vec, tnapi);
10728 tg3_napi_disable(tp);
10730 tg3_free_consistent(tp);
10738 static void tg3_stop(struct tg3 *tp)
10742 tg3_reset_task_cancel(tp);
10743 tg3_netif_stop(tp);
10745 tg3_timer_stop(tp);
10747 tg3_hwmon_close(tp);
10751 tg3_full_lock(tp, 1);
10753 tg3_disable_ints(tp);
10755 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10756 tg3_free_rings(tp);
10757 tg3_flag_clear(tp, INIT_COMPLETE);
10759 tg3_full_unlock(tp);
10761 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10762 struct tg3_napi *tnapi = &tp->napi[i];
10763 free_irq(tnapi->irq_vec, tnapi);
10770 tg3_free_consistent(tp);
10773 static int tg3_open(struct net_device *dev)
10775 struct tg3 *tp = netdev_priv(dev);
10778 if (tp->fw_needed) {
10779 err = tg3_request_firmware(tp);
10780 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10784 netdev_warn(tp->dev, "TSO capability disabled\n");
10785 tg3_flag_clear(tp, TSO_CAPABLE);
10786 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10787 netdev_notice(tp->dev, "TSO capability restored\n");
10788 tg3_flag_set(tp, TSO_CAPABLE);
10792 tg3_carrier_off(tp);
10794 err = tg3_power_up(tp);
10798 tg3_full_lock(tp, 0);
10800 tg3_disable_ints(tp);
10801 tg3_flag_clear(tp, INIT_COMPLETE);
10803 tg3_full_unlock(tp);
10805 err = tg3_start(tp, true, true, true);
10807 tg3_frob_aux_power(tp, false);
10808 pci_set_power_state(tp->pdev, PCI_D3hot);
10811 if (tg3_flag(tp, PTP_CAPABLE)) {
10812 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10814 if (IS_ERR(tp->ptp_clock))
10815 tp->ptp_clock = NULL;
10821 static int tg3_close(struct net_device *dev)
10823 struct tg3 *tp = netdev_priv(dev);
10829 /* Clear stats across close / open calls */
10830 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10831 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10833 tg3_power_down(tp);
10835 tg3_carrier_off(tp);
10840 static inline u64 get_stat64(tg3_stat64_t *val)
10842 return ((u64)val->high << 32) | ((u64)val->low);
10845 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10847 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10849 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10850 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10854 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10855 tg3_writephy(tp, MII_TG3_TEST1,
10856 val | MII_TG3_TEST1_CRC_EN);
10857 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10861 tp->phy_crc_errors += val;
10863 return tp->phy_crc_errors;
10866 return get_stat64(&hw_stats->rx_fcs_errors);
10869 #define ESTAT_ADD(member) \
10870 estats->member = old_estats->member + \
10871 get_stat64(&hw_stats->member)
10873 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10875 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10876 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10878 ESTAT_ADD(rx_octets);
10879 ESTAT_ADD(rx_fragments);
10880 ESTAT_ADD(rx_ucast_packets);
10881 ESTAT_ADD(rx_mcast_packets);
10882 ESTAT_ADD(rx_bcast_packets);
10883 ESTAT_ADD(rx_fcs_errors);
10884 ESTAT_ADD(rx_align_errors);
10885 ESTAT_ADD(rx_xon_pause_rcvd);
10886 ESTAT_ADD(rx_xoff_pause_rcvd);
10887 ESTAT_ADD(rx_mac_ctrl_rcvd);
10888 ESTAT_ADD(rx_xoff_entered);
10889 ESTAT_ADD(rx_frame_too_long_errors);
10890 ESTAT_ADD(rx_jabbers);
10891 ESTAT_ADD(rx_undersize_packets);
10892 ESTAT_ADD(rx_in_length_errors);
10893 ESTAT_ADD(rx_out_length_errors);
10894 ESTAT_ADD(rx_64_or_less_octet_packets);
10895 ESTAT_ADD(rx_65_to_127_octet_packets);
10896 ESTAT_ADD(rx_128_to_255_octet_packets);
10897 ESTAT_ADD(rx_256_to_511_octet_packets);
10898 ESTAT_ADD(rx_512_to_1023_octet_packets);
10899 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10900 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10901 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10902 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10903 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10905 ESTAT_ADD(tx_octets);
10906 ESTAT_ADD(tx_collisions);
10907 ESTAT_ADD(tx_xon_sent);
10908 ESTAT_ADD(tx_xoff_sent);
10909 ESTAT_ADD(tx_flow_control);
10910 ESTAT_ADD(tx_mac_errors);
10911 ESTAT_ADD(tx_single_collisions);
10912 ESTAT_ADD(tx_mult_collisions);
10913 ESTAT_ADD(tx_deferred);
10914 ESTAT_ADD(tx_excessive_collisions);
10915 ESTAT_ADD(tx_late_collisions);
10916 ESTAT_ADD(tx_collide_2times);
10917 ESTAT_ADD(tx_collide_3times);
10918 ESTAT_ADD(tx_collide_4times);
10919 ESTAT_ADD(tx_collide_5times);
10920 ESTAT_ADD(tx_collide_6times);
10921 ESTAT_ADD(tx_collide_7times);
10922 ESTAT_ADD(tx_collide_8times);
10923 ESTAT_ADD(tx_collide_9times);
10924 ESTAT_ADD(tx_collide_10times);
10925 ESTAT_ADD(tx_collide_11times);
10926 ESTAT_ADD(tx_collide_12times);
10927 ESTAT_ADD(tx_collide_13times);
10928 ESTAT_ADD(tx_collide_14times);
10929 ESTAT_ADD(tx_collide_15times);
10930 ESTAT_ADD(tx_ucast_packets);
10931 ESTAT_ADD(tx_mcast_packets);
10932 ESTAT_ADD(tx_bcast_packets);
10933 ESTAT_ADD(tx_carrier_sense_errors);
10934 ESTAT_ADD(tx_discards);
10935 ESTAT_ADD(tx_errors);
10937 ESTAT_ADD(dma_writeq_full);
10938 ESTAT_ADD(dma_write_prioq_full);
10939 ESTAT_ADD(rxbds_empty);
10940 ESTAT_ADD(rx_discards);
10941 ESTAT_ADD(rx_errors);
10942 ESTAT_ADD(rx_threshold_hit);
10944 ESTAT_ADD(dma_readq_full);
10945 ESTAT_ADD(dma_read_prioq_full);
10946 ESTAT_ADD(tx_comp_queue_full);
10948 ESTAT_ADD(ring_set_send_prod_index);
10949 ESTAT_ADD(ring_status_update);
10950 ESTAT_ADD(nic_irqs);
10951 ESTAT_ADD(nic_avoided_irqs);
10952 ESTAT_ADD(nic_tx_threshold_hit);
10954 ESTAT_ADD(mbuf_lwm_thresh_hit);
10957 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10959 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10960 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10962 stats->rx_packets = old_stats->rx_packets +
10963 get_stat64(&hw_stats->rx_ucast_packets) +
10964 get_stat64(&hw_stats->rx_mcast_packets) +
10965 get_stat64(&hw_stats->rx_bcast_packets);
10967 stats->tx_packets = old_stats->tx_packets +
10968 get_stat64(&hw_stats->tx_ucast_packets) +
10969 get_stat64(&hw_stats->tx_mcast_packets) +
10970 get_stat64(&hw_stats->tx_bcast_packets);
10972 stats->rx_bytes = old_stats->rx_bytes +
10973 get_stat64(&hw_stats->rx_octets);
10974 stats->tx_bytes = old_stats->tx_bytes +
10975 get_stat64(&hw_stats->tx_octets);
10977 stats->rx_errors = old_stats->rx_errors +
10978 get_stat64(&hw_stats->rx_errors);
10979 stats->tx_errors = old_stats->tx_errors +
10980 get_stat64(&hw_stats->tx_errors) +
10981 get_stat64(&hw_stats->tx_mac_errors) +
10982 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10983 get_stat64(&hw_stats->tx_discards);
10985 stats->multicast = old_stats->multicast +
10986 get_stat64(&hw_stats->rx_mcast_packets);
10987 stats->collisions = old_stats->collisions +
10988 get_stat64(&hw_stats->tx_collisions);
10990 stats->rx_length_errors = old_stats->rx_length_errors +
10991 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10992 get_stat64(&hw_stats->rx_undersize_packets);
10994 stats->rx_over_errors = old_stats->rx_over_errors +
10995 get_stat64(&hw_stats->rxbds_empty);
10996 stats->rx_frame_errors = old_stats->rx_frame_errors +
10997 get_stat64(&hw_stats->rx_align_errors);
10998 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10999 get_stat64(&hw_stats->tx_discards);
11000 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11001 get_stat64(&hw_stats->tx_carrier_sense_errors);
11003 stats->rx_crc_errors = old_stats->rx_crc_errors +
11004 tg3_calc_crc_errors(tp);
11006 stats->rx_missed_errors = old_stats->rx_missed_errors +
11007 get_stat64(&hw_stats->rx_discards);
11009 stats->rx_dropped = tp->rx_dropped;
11010 stats->tx_dropped = tp->tx_dropped;
11013 static int tg3_get_regs_len(struct net_device *dev)
11015 return TG3_REG_BLK_SIZE;
11018 static void tg3_get_regs(struct net_device *dev,
11019 struct ethtool_regs *regs, void *_p)
11021 struct tg3 *tp = netdev_priv(dev);
11025 memset(_p, 0, TG3_REG_BLK_SIZE);
11027 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11030 tg3_full_lock(tp, 0);
11032 tg3_dump_legacy_regs(tp, (u32 *)_p);
11034 tg3_full_unlock(tp);
11037 static int tg3_get_eeprom_len(struct net_device *dev)
11039 struct tg3 *tp = netdev_priv(dev);
11041 return tp->nvram_size;
11044 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11046 struct tg3 *tp = netdev_priv(dev);
11049 u32 i, offset, len, b_offset, b_count;
11052 if (tg3_flag(tp, NO_NVRAM))
11055 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11058 offset = eeprom->offset;
11062 eeprom->magic = TG3_EEPROM_MAGIC;
11065 /* adjustments to start on required 4 byte boundary */
11066 b_offset = offset & 3;
11067 b_count = 4 - b_offset;
11068 if (b_count > len) {
11069 /* i.e. offset=1 len=2 */
11072 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11075 memcpy(data, ((char *)&val) + b_offset, b_count);
11078 eeprom->len += b_count;
11081 /* read bytes up to the last 4 byte boundary */
11082 pd = &data[eeprom->len];
11083 for (i = 0; i < (len - (len & 3)); i += 4) {
11084 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11089 memcpy(pd + i, &val, 4);
11094 /* read last bytes not ending on 4 byte boundary */
11095 pd = &data[eeprom->len];
11097 b_offset = offset + len - b_count;
11098 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11101 memcpy(pd, &val, b_count);
11102 eeprom->len += b_count;
11107 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11109 struct tg3 *tp = netdev_priv(dev);
11111 u32 offset, len, b_offset, odd_len;
11115 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11118 if (tg3_flag(tp, NO_NVRAM) ||
11119 eeprom->magic != TG3_EEPROM_MAGIC)
11122 offset = eeprom->offset;
11125 if ((b_offset = (offset & 3))) {
11126 /* adjustments to start on required 4 byte boundary */
11127 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11138 /* adjustments to end on required 4 byte boundary */
11140 len = (len + 3) & ~3;
11141 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11147 if (b_offset || odd_len) {
11148 buf = kmalloc(len, GFP_KERNEL);
11152 memcpy(buf, &start, 4);
11154 memcpy(buf+len-4, &end, 4);
11155 memcpy(buf + b_offset, data, eeprom->len);
11158 ret = tg3_nvram_write_block(tp, offset, len, buf);
11166 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11168 struct tg3 *tp = netdev_priv(dev);
11170 if (tg3_flag(tp, USE_PHYLIB)) {
11171 struct phy_device *phydev;
11172 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11174 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11175 return phy_ethtool_gset(phydev, cmd);
11178 cmd->supported = (SUPPORTED_Autoneg);
11180 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11181 cmd->supported |= (SUPPORTED_1000baseT_Half |
11182 SUPPORTED_1000baseT_Full);
11184 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11185 cmd->supported |= (SUPPORTED_100baseT_Half |
11186 SUPPORTED_100baseT_Full |
11187 SUPPORTED_10baseT_Half |
11188 SUPPORTED_10baseT_Full |
11190 cmd->port = PORT_TP;
11192 cmd->supported |= SUPPORTED_FIBRE;
11193 cmd->port = PORT_FIBRE;
11196 cmd->advertising = tp->link_config.advertising;
11197 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11198 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11199 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11200 cmd->advertising |= ADVERTISED_Pause;
11202 cmd->advertising |= ADVERTISED_Pause |
11203 ADVERTISED_Asym_Pause;
11205 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11206 cmd->advertising |= ADVERTISED_Asym_Pause;
11209 if (netif_running(dev) && tp->link_up) {
11210 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11211 cmd->duplex = tp->link_config.active_duplex;
11212 cmd->lp_advertising = tp->link_config.rmt_adv;
11213 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11214 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11215 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11217 cmd->eth_tp_mdix = ETH_TP_MDI;
11220 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11221 cmd->duplex = DUPLEX_UNKNOWN;
11222 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11224 cmd->phy_address = tp->phy_addr;
11225 cmd->transceiver = XCVR_INTERNAL;
11226 cmd->autoneg = tp->link_config.autoneg;
11232 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11234 struct tg3 *tp = netdev_priv(dev);
11235 u32 speed = ethtool_cmd_speed(cmd);
11237 if (tg3_flag(tp, USE_PHYLIB)) {
11238 struct phy_device *phydev;
11239 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11241 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11242 return phy_ethtool_sset(phydev, cmd);
11245 if (cmd->autoneg != AUTONEG_ENABLE &&
11246 cmd->autoneg != AUTONEG_DISABLE)
11249 if (cmd->autoneg == AUTONEG_DISABLE &&
11250 cmd->duplex != DUPLEX_FULL &&
11251 cmd->duplex != DUPLEX_HALF)
11254 if (cmd->autoneg == AUTONEG_ENABLE) {
11255 u32 mask = ADVERTISED_Autoneg |
11257 ADVERTISED_Asym_Pause;
11259 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11260 mask |= ADVERTISED_1000baseT_Half |
11261 ADVERTISED_1000baseT_Full;
11263 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11264 mask |= ADVERTISED_100baseT_Half |
11265 ADVERTISED_100baseT_Full |
11266 ADVERTISED_10baseT_Half |
11267 ADVERTISED_10baseT_Full |
11270 mask |= ADVERTISED_FIBRE;
11272 if (cmd->advertising & ~mask)
11275 mask &= (ADVERTISED_1000baseT_Half |
11276 ADVERTISED_1000baseT_Full |
11277 ADVERTISED_100baseT_Half |
11278 ADVERTISED_100baseT_Full |
11279 ADVERTISED_10baseT_Half |
11280 ADVERTISED_10baseT_Full);
11282 cmd->advertising &= mask;
11284 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11285 if (speed != SPEED_1000)
11288 if (cmd->duplex != DUPLEX_FULL)
11291 if (speed != SPEED_100 &&
11297 tg3_full_lock(tp, 0);
11299 tp->link_config.autoneg = cmd->autoneg;
11300 if (cmd->autoneg == AUTONEG_ENABLE) {
11301 tp->link_config.advertising = (cmd->advertising |
11302 ADVERTISED_Autoneg);
11303 tp->link_config.speed = SPEED_UNKNOWN;
11304 tp->link_config.duplex = DUPLEX_UNKNOWN;
11306 tp->link_config.advertising = 0;
11307 tp->link_config.speed = speed;
11308 tp->link_config.duplex = cmd->duplex;
11311 if (netif_running(dev))
11312 tg3_setup_phy(tp, 1);
11314 tg3_full_unlock(tp);
11319 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11321 struct tg3 *tp = netdev_priv(dev);
11323 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11324 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11325 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11326 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11329 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11331 struct tg3 *tp = netdev_priv(dev);
11333 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11334 wol->supported = WAKE_MAGIC;
11336 wol->supported = 0;
11338 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11339 wol->wolopts = WAKE_MAGIC;
11340 memset(&wol->sopass, 0, sizeof(wol->sopass));
11343 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11345 struct tg3 *tp = netdev_priv(dev);
11346 struct device *dp = &tp->pdev->dev;
11348 if (wol->wolopts & ~WAKE_MAGIC)
11350 if ((wol->wolopts & WAKE_MAGIC) &&
11351 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11354 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11356 spin_lock_bh(&tp->lock);
11357 if (device_may_wakeup(dp))
11358 tg3_flag_set(tp, WOL_ENABLE);
11360 tg3_flag_clear(tp, WOL_ENABLE);
11361 spin_unlock_bh(&tp->lock);
11366 static u32 tg3_get_msglevel(struct net_device *dev)
11368 struct tg3 *tp = netdev_priv(dev);
11369 return tp->msg_enable;
11372 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11374 struct tg3 *tp = netdev_priv(dev);
11375 tp->msg_enable = value;
11378 static int tg3_nway_reset(struct net_device *dev)
11380 struct tg3 *tp = netdev_priv(dev);
11383 if (!netif_running(dev))
11386 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11389 if (tg3_flag(tp, USE_PHYLIB)) {
11390 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11392 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11396 spin_lock_bh(&tp->lock);
11398 tg3_readphy(tp, MII_BMCR, &bmcr);
11399 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11400 ((bmcr & BMCR_ANENABLE) ||
11401 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11402 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11406 spin_unlock_bh(&tp->lock);
11412 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11414 struct tg3 *tp = netdev_priv(dev);
11416 ering->rx_max_pending = tp->rx_std_ring_mask;
11417 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11418 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11420 ering->rx_jumbo_max_pending = 0;
11422 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11424 ering->rx_pending = tp->rx_pending;
11425 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11426 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11428 ering->rx_jumbo_pending = 0;
11430 ering->tx_pending = tp->napi[0].tx_pending;
11433 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11435 struct tg3 *tp = netdev_priv(dev);
11436 int i, irq_sync = 0, err = 0;
11438 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11439 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11440 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11441 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11442 (tg3_flag(tp, TSO_BUG) &&
11443 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11446 if (netif_running(dev)) {
11448 tg3_netif_stop(tp);
11452 tg3_full_lock(tp, irq_sync);
11454 tp->rx_pending = ering->rx_pending;
11456 if (tg3_flag(tp, MAX_RXPEND_64) &&
11457 tp->rx_pending > 63)
11458 tp->rx_pending = 63;
11459 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11461 for (i = 0; i < tp->irq_max; i++)
11462 tp->napi[i].tx_pending = ering->tx_pending;
11464 if (netif_running(dev)) {
11465 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11466 err = tg3_restart_hw(tp, 1);
11468 tg3_netif_start(tp);
11471 tg3_full_unlock(tp);
11473 if (irq_sync && !err)
11479 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11481 struct tg3 *tp = netdev_priv(dev);
11483 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11485 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11486 epause->rx_pause = 1;
11488 epause->rx_pause = 0;
11490 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11491 epause->tx_pause = 1;
11493 epause->tx_pause = 0;
11496 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11498 struct tg3 *tp = netdev_priv(dev);
11501 if (tg3_flag(tp, USE_PHYLIB)) {
11503 struct phy_device *phydev;
11505 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11507 if (!(phydev->supported & SUPPORTED_Pause) ||
11508 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11509 (epause->rx_pause != epause->tx_pause)))
11512 tp->link_config.flowctrl = 0;
11513 if (epause->rx_pause) {
11514 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11516 if (epause->tx_pause) {
11517 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11518 newadv = ADVERTISED_Pause;
11520 newadv = ADVERTISED_Pause |
11521 ADVERTISED_Asym_Pause;
11522 } else if (epause->tx_pause) {
11523 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11524 newadv = ADVERTISED_Asym_Pause;
11528 if (epause->autoneg)
11529 tg3_flag_set(tp, PAUSE_AUTONEG);
11531 tg3_flag_clear(tp, PAUSE_AUTONEG);
11533 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11534 u32 oldadv = phydev->advertising &
11535 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11536 if (oldadv != newadv) {
11537 phydev->advertising &=
11538 ~(ADVERTISED_Pause |
11539 ADVERTISED_Asym_Pause);
11540 phydev->advertising |= newadv;
11541 if (phydev->autoneg) {
11543 * Always renegotiate the link to
11544 * inform our link partner of our
11545 * flow control settings, even if the
11546 * flow control is forced. Let
11547 * tg3_adjust_link() do the final
11548 * flow control setup.
11550 return phy_start_aneg(phydev);
11554 if (!epause->autoneg)
11555 tg3_setup_flow_control(tp, 0, 0);
11557 tp->link_config.advertising &=
11558 ~(ADVERTISED_Pause |
11559 ADVERTISED_Asym_Pause);
11560 tp->link_config.advertising |= newadv;
11565 if (netif_running(dev)) {
11566 tg3_netif_stop(tp);
11570 tg3_full_lock(tp, irq_sync);
11572 if (epause->autoneg)
11573 tg3_flag_set(tp, PAUSE_AUTONEG);
11575 tg3_flag_clear(tp, PAUSE_AUTONEG);
11576 if (epause->rx_pause)
11577 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11579 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11580 if (epause->tx_pause)
11581 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11583 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11585 if (netif_running(dev)) {
11586 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11587 err = tg3_restart_hw(tp, 1);
11589 tg3_netif_start(tp);
11592 tg3_full_unlock(tp);
11598 static int tg3_get_sset_count(struct net_device *dev, int sset)
11602 return TG3_NUM_TEST;
11604 return TG3_NUM_STATS;
11606 return -EOPNOTSUPP;
11610 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11611 u32 *rules __always_unused)
11613 struct tg3 *tp = netdev_priv(dev);
11615 if (!tg3_flag(tp, SUPPORT_MSIX))
11616 return -EOPNOTSUPP;
11618 switch (info->cmd) {
11619 case ETHTOOL_GRXRINGS:
11620 if (netif_running(tp->dev))
11621 info->data = tp->rxq_cnt;
11623 info->data = num_online_cpus();
11624 if (info->data > TG3_RSS_MAX_NUM_QS)
11625 info->data = TG3_RSS_MAX_NUM_QS;
11628 /* The first interrupt vector only
11629 * handles link interrupts.
11635 return -EOPNOTSUPP;
11639 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11642 struct tg3 *tp = netdev_priv(dev);
11644 if (tg3_flag(tp, SUPPORT_MSIX))
11645 size = TG3_RSS_INDIR_TBL_SIZE;
11650 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11652 struct tg3 *tp = netdev_priv(dev);
11655 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11656 indir[i] = tp->rss_ind_tbl[i];
11661 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11663 struct tg3 *tp = netdev_priv(dev);
11666 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11667 tp->rss_ind_tbl[i] = indir[i];
11669 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11672 /* It is legal to write the indirection
11673 * table while the device is running.
11675 tg3_full_lock(tp, 0);
11676 tg3_rss_write_indir_tbl(tp);
11677 tg3_full_unlock(tp);
11682 static void tg3_get_channels(struct net_device *dev,
11683 struct ethtool_channels *channel)
11685 struct tg3 *tp = netdev_priv(dev);
11686 u32 deflt_qs = netif_get_num_default_rss_queues();
11688 channel->max_rx = tp->rxq_max;
11689 channel->max_tx = tp->txq_max;
11691 if (netif_running(dev)) {
11692 channel->rx_count = tp->rxq_cnt;
11693 channel->tx_count = tp->txq_cnt;
11696 channel->rx_count = tp->rxq_req;
11698 channel->rx_count = min(deflt_qs, tp->rxq_max);
11701 channel->tx_count = tp->txq_req;
11703 channel->tx_count = min(deflt_qs, tp->txq_max);
11707 static int tg3_set_channels(struct net_device *dev,
11708 struct ethtool_channels *channel)
11710 struct tg3 *tp = netdev_priv(dev);
11712 if (!tg3_flag(tp, SUPPORT_MSIX))
11713 return -EOPNOTSUPP;
11715 if (channel->rx_count > tp->rxq_max ||
11716 channel->tx_count > tp->txq_max)
11719 tp->rxq_req = channel->rx_count;
11720 tp->txq_req = channel->tx_count;
11722 if (!netif_running(dev))
11727 tg3_carrier_off(tp);
11729 tg3_start(tp, true, false, false);
11734 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11736 switch (stringset) {
11738 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11741 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11744 WARN_ON(1); /* we need a WARN() */
11749 static int tg3_set_phys_id(struct net_device *dev,
11750 enum ethtool_phys_id_state state)
11752 struct tg3 *tp = netdev_priv(dev);
11754 if (!netif_running(tp->dev))
11758 case ETHTOOL_ID_ACTIVE:
11759 return 1; /* cycle on/off once per second */
11761 case ETHTOOL_ID_ON:
11762 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11763 LED_CTRL_1000MBPS_ON |
11764 LED_CTRL_100MBPS_ON |
11765 LED_CTRL_10MBPS_ON |
11766 LED_CTRL_TRAFFIC_OVERRIDE |
11767 LED_CTRL_TRAFFIC_BLINK |
11768 LED_CTRL_TRAFFIC_LED);
11771 case ETHTOOL_ID_OFF:
11772 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11773 LED_CTRL_TRAFFIC_OVERRIDE);
11776 case ETHTOOL_ID_INACTIVE:
11777 tw32(MAC_LED_CTRL, tp->led_ctrl);
11784 static void tg3_get_ethtool_stats(struct net_device *dev,
11785 struct ethtool_stats *estats, u64 *tmp_stats)
11787 struct tg3 *tp = netdev_priv(dev);
11790 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11792 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11795 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11799 u32 offset = 0, len = 0;
11802 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11805 if (magic == TG3_EEPROM_MAGIC) {
11806 for (offset = TG3_NVM_DIR_START;
11807 offset < TG3_NVM_DIR_END;
11808 offset += TG3_NVM_DIRENT_SIZE) {
11809 if (tg3_nvram_read(tp, offset, &val))
11812 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11813 TG3_NVM_DIRTYPE_EXTVPD)
11817 if (offset != TG3_NVM_DIR_END) {
11818 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11819 if (tg3_nvram_read(tp, offset + 4, &offset))
11822 offset = tg3_nvram_logical_addr(tp, offset);
11826 if (!offset || !len) {
11827 offset = TG3_NVM_VPD_OFF;
11828 len = TG3_NVM_VPD_LEN;
11831 buf = kmalloc(len, GFP_KERNEL);
11835 if (magic == TG3_EEPROM_MAGIC) {
11836 for (i = 0; i < len; i += 4) {
11837 /* The data is in little-endian format in NVRAM.
11838 * Use the big-endian read routines to preserve
11839 * the byte order as it exists in NVRAM.
11841 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11847 unsigned int pos = 0;
11849 ptr = (u8 *)&buf[0];
11850 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11851 cnt = pci_read_vpd(tp->pdev, pos,
11853 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11871 #define NVRAM_TEST_SIZE 0x100
11872 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11873 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11874 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11875 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11876 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11877 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11878 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11879 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11881 static int tg3_test_nvram(struct tg3 *tp)
11883 u32 csum, magic, len;
11885 int i, j, k, err = 0, size;
11887 if (tg3_flag(tp, NO_NVRAM))
11890 if (tg3_nvram_read(tp, 0, &magic) != 0)
11893 if (magic == TG3_EEPROM_MAGIC)
11894 size = NVRAM_TEST_SIZE;
11895 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11896 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11897 TG3_EEPROM_SB_FORMAT_1) {
11898 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11899 case TG3_EEPROM_SB_REVISION_0:
11900 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11902 case TG3_EEPROM_SB_REVISION_2:
11903 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11905 case TG3_EEPROM_SB_REVISION_3:
11906 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11908 case TG3_EEPROM_SB_REVISION_4:
11909 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11911 case TG3_EEPROM_SB_REVISION_5:
11912 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11914 case TG3_EEPROM_SB_REVISION_6:
11915 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11922 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11923 size = NVRAM_SELFBOOT_HW_SIZE;
11927 buf = kmalloc(size, GFP_KERNEL);
11932 for (i = 0, j = 0; i < size; i += 4, j++) {
11933 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11940 /* Selfboot format */
11941 magic = be32_to_cpu(buf[0]);
11942 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11943 TG3_EEPROM_MAGIC_FW) {
11944 u8 *buf8 = (u8 *) buf, csum8 = 0;
11946 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11947 TG3_EEPROM_SB_REVISION_2) {
11948 /* For rev 2, the csum doesn't include the MBA. */
11949 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11951 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11954 for (i = 0; i < size; i++)
11967 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11968 TG3_EEPROM_MAGIC_HW) {
11969 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11970 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11971 u8 *buf8 = (u8 *) buf;
11973 /* Separate the parity bits and the data bytes. */
11974 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11975 if ((i == 0) || (i == 8)) {
11979 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11980 parity[k++] = buf8[i] & msk;
11982 } else if (i == 16) {
11986 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11987 parity[k++] = buf8[i] & msk;
11990 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11991 parity[k++] = buf8[i] & msk;
11994 data[j++] = buf8[i];
11998 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11999 u8 hw8 = hweight8(data[i]);
12001 if ((hw8 & 0x1) && parity[i])
12003 else if (!(hw8 & 0x1) && !parity[i])
12012 /* Bootstrap checksum at offset 0x10 */
12013 csum = calc_crc((unsigned char *) buf, 0x10);
12014 if (csum != le32_to_cpu(buf[0x10/4]))
12017 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12018 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12019 if (csum != le32_to_cpu(buf[0xfc/4]))
12024 buf = tg3_vpd_readblock(tp, &len);
12028 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12030 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12034 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12037 i += PCI_VPD_LRDT_TAG_SIZE;
12038 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12039 PCI_VPD_RO_KEYWORD_CHKSUM);
12043 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12045 for (i = 0; i <= j; i++)
12046 csum8 += ((u8 *)buf)[i];
12060 #define TG3_SERDES_TIMEOUT_SEC 2
12061 #define TG3_COPPER_TIMEOUT_SEC 6
12063 static int tg3_test_link(struct tg3 *tp)
12067 if (!netif_running(tp->dev))
12070 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12071 max = TG3_SERDES_TIMEOUT_SEC;
12073 max = TG3_COPPER_TIMEOUT_SEC;
12075 for (i = 0; i < max; i++) {
12079 if (msleep_interruptible(1000))
12086 /* Only test the commonly used registers */
12087 static int tg3_test_registers(struct tg3 *tp)
12089 int i, is_5705, is_5750;
12090 u32 offset, read_mask, write_mask, val, save_val, read_val;
12094 #define TG3_FL_5705 0x1
12095 #define TG3_FL_NOT_5705 0x2
12096 #define TG3_FL_NOT_5788 0x4
12097 #define TG3_FL_NOT_5750 0x8
12101 /* MAC Control Registers */
12102 { MAC_MODE, TG3_FL_NOT_5705,
12103 0x00000000, 0x00ef6f8c },
12104 { MAC_MODE, TG3_FL_5705,
12105 0x00000000, 0x01ef6b8c },
12106 { MAC_STATUS, TG3_FL_NOT_5705,
12107 0x03800107, 0x00000000 },
12108 { MAC_STATUS, TG3_FL_5705,
12109 0x03800100, 0x00000000 },
12110 { MAC_ADDR_0_HIGH, 0x0000,
12111 0x00000000, 0x0000ffff },
12112 { MAC_ADDR_0_LOW, 0x0000,
12113 0x00000000, 0xffffffff },
12114 { MAC_RX_MTU_SIZE, 0x0000,
12115 0x00000000, 0x0000ffff },
12116 { MAC_TX_MODE, 0x0000,
12117 0x00000000, 0x00000070 },
12118 { MAC_TX_LENGTHS, 0x0000,
12119 0x00000000, 0x00003fff },
12120 { MAC_RX_MODE, TG3_FL_NOT_5705,
12121 0x00000000, 0x000007fc },
12122 { MAC_RX_MODE, TG3_FL_5705,
12123 0x00000000, 0x000007dc },
12124 { MAC_HASH_REG_0, 0x0000,
12125 0x00000000, 0xffffffff },
12126 { MAC_HASH_REG_1, 0x0000,
12127 0x00000000, 0xffffffff },
12128 { MAC_HASH_REG_2, 0x0000,
12129 0x00000000, 0xffffffff },
12130 { MAC_HASH_REG_3, 0x0000,
12131 0x00000000, 0xffffffff },
12133 /* Receive Data and Receive BD Initiator Control Registers. */
12134 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12135 0x00000000, 0xffffffff },
12136 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12137 0x00000000, 0xffffffff },
12138 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12139 0x00000000, 0x00000003 },
12140 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12141 0x00000000, 0xffffffff },
12142 { RCVDBDI_STD_BD+0, 0x0000,
12143 0x00000000, 0xffffffff },
12144 { RCVDBDI_STD_BD+4, 0x0000,
12145 0x00000000, 0xffffffff },
12146 { RCVDBDI_STD_BD+8, 0x0000,
12147 0x00000000, 0xffff0002 },
12148 { RCVDBDI_STD_BD+0xc, 0x0000,
12149 0x00000000, 0xffffffff },
12151 /* Receive BD Initiator Control Registers. */
12152 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12153 0x00000000, 0xffffffff },
12154 { RCVBDI_STD_THRESH, TG3_FL_5705,
12155 0x00000000, 0x000003ff },
12156 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12157 0x00000000, 0xffffffff },
12159 /* Host Coalescing Control Registers. */
12160 { HOSTCC_MODE, TG3_FL_NOT_5705,
12161 0x00000000, 0x00000004 },
12162 { HOSTCC_MODE, TG3_FL_5705,
12163 0x00000000, 0x000000f6 },
12164 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12165 0x00000000, 0xffffffff },
12166 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12167 0x00000000, 0x000003ff },
12168 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12169 0x00000000, 0xffffffff },
12170 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12171 0x00000000, 0x000003ff },
12172 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12173 0x00000000, 0xffffffff },
12174 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12175 0x00000000, 0x000000ff },
12176 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12177 0x00000000, 0xffffffff },
12178 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12179 0x00000000, 0x000000ff },
12180 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12181 0x00000000, 0xffffffff },
12182 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12183 0x00000000, 0xffffffff },
12184 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12185 0x00000000, 0xffffffff },
12186 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12187 0x00000000, 0x000000ff },
12188 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12189 0x00000000, 0xffffffff },
12190 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12191 0x00000000, 0x000000ff },
12192 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12193 0x00000000, 0xffffffff },
12194 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12195 0x00000000, 0xffffffff },
12196 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12197 0x00000000, 0xffffffff },
12198 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12199 0x00000000, 0xffffffff },
12200 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12201 0x00000000, 0xffffffff },
12202 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12203 0xffffffff, 0x00000000 },
12204 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12205 0xffffffff, 0x00000000 },
12207 /* Buffer Manager Control Registers. */
12208 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12209 0x00000000, 0x007fff80 },
12210 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12211 0x00000000, 0x007fffff },
12212 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12213 0x00000000, 0x0000003f },
12214 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12215 0x00000000, 0x000001ff },
12216 { BUFMGR_MB_HIGH_WATER, 0x0000,
12217 0x00000000, 0x000001ff },
12218 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12219 0xffffffff, 0x00000000 },
12220 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12221 0xffffffff, 0x00000000 },
12223 /* Mailbox Registers */
12224 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12225 0x00000000, 0x000001ff },
12226 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12227 0x00000000, 0x000001ff },
12228 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12229 0x00000000, 0x000007ff },
12230 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12231 0x00000000, 0x000001ff },
12233 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12236 is_5705 = is_5750 = 0;
12237 if (tg3_flag(tp, 5705_PLUS)) {
12239 if (tg3_flag(tp, 5750_PLUS))
12243 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12244 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12247 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12250 if (tg3_flag(tp, IS_5788) &&
12251 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12254 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12257 offset = (u32) reg_tbl[i].offset;
12258 read_mask = reg_tbl[i].read_mask;
12259 write_mask = reg_tbl[i].write_mask;
12261 /* Save the original register content */
12262 save_val = tr32(offset);
12264 /* Determine the read-only value. */
12265 read_val = save_val & read_mask;
12267 /* Write zero to the register, then make sure the read-only bits
12268 * are not changed and the read/write bits are all zeros.
12272 val = tr32(offset);
12274 /* Test the read-only and read/write bits. */
12275 if (((val & read_mask) != read_val) || (val & write_mask))
12278 /* Write ones to all the bits defined by RdMask and WrMask, then
12279 * make sure the read-only bits are not changed and the
12280 * read/write bits are all ones.
12282 tw32(offset, read_mask | write_mask);
12284 val = tr32(offset);
12286 /* Test the read-only bits. */
12287 if ((val & read_mask) != read_val)
12290 /* Test the read/write bits. */
12291 if ((val & write_mask) != write_mask)
12294 tw32(offset, save_val);
12300 if (netif_msg_hw(tp))
12301 netdev_err(tp->dev,
12302 "Register test failed at offset %x\n", offset);
12303 tw32(offset, save_val);
12307 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12309 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12313 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12314 for (j = 0; j < len; j += 4) {
12317 tg3_write_mem(tp, offset + j, test_pattern[i]);
12318 tg3_read_mem(tp, offset + j, &val);
12319 if (val != test_pattern[i])
12326 static int tg3_test_memory(struct tg3 *tp)
12328 static struct mem_entry {
12331 } mem_tbl_570x[] = {
12332 { 0x00000000, 0x00b50},
12333 { 0x00002000, 0x1c000},
12334 { 0xffffffff, 0x00000}
12335 }, mem_tbl_5705[] = {
12336 { 0x00000100, 0x0000c},
12337 { 0x00000200, 0x00008},
12338 { 0x00004000, 0x00800},
12339 { 0x00006000, 0x01000},
12340 { 0x00008000, 0x02000},
12341 { 0x00010000, 0x0e000},
12342 { 0xffffffff, 0x00000}
12343 }, mem_tbl_5755[] = {
12344 { 0x00000200, 0x00008},
12345 { 0x00004000, 0x00800},
12346 { 0x00006000, 0x00800},
12347 { 0x00008000, 0x02000},
12348 { 0x00010000, 0x0c000},
12349 { 0xffffffff, 0x00000}
12350 }, mem_tbl_5906[] = {
12351 { 0x00000200, 0x00008},
12352 { 0x00004000, 0x00400},
12353 { 0x00006000, 0x00400},
12354 { 0x00008000, 0x01000},
12355 { 0x00010000, 0x01000},
12356 { 0xffffffff, 0x00000}
12357 }, mem_tbl_5717[] = {
12358 { 0x00000200, 0x00008},
12359 { 0x00010000, 0x0a000},
12360 { 0x00020000, 0x13c00},
12361 { 0xffffffff, 0x00000}
12362 }, mem_tbl_57765[] = {
12363 { 0x00000200, 0x00008},
12364 { 0x00004000, 0x00800},
12365 { 0x00006000, 0x09800},
12366 { 0x00010000, 0x0a000},
12367 { 0xffffffff, 0x00000}
12369 struct mem_entry *mem_tbl;
12373 if (tg3_flag(tp, 5717_PLUS))
12374 mem_tbl = mem_tbl_5717;
12375 else if (tg3_flag(tp, 57765_CLASS) ||
12376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12377 mem_tbl = mem_tbl_57765;
12378 else if (tg3_flag(tp, 5755_PLUS))
12379 mem_tbl = mem_tbl_5755;
12380 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12381 mem_tbl = mem_tbl_5906;
12382 else if (tg3_flag(tp, 5705_PLUS))
12383 mem_tbl = mem_tbl_5705;
12385 mem_tbl = mem_tbl_570x;
12387 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12388 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12396 #define TG3_TSO_MSS 500
12398 #define TG3_TSO_IP_HDR_LEN 20
12399 #define TG3_TSO_TCP_HDR_LEN 20
12400 #define TG3_TSO_TCP_OPT_LEN 12
12402 static const u8 tg3_tso_header[] = {
12404 0x45, 0x00, 0x00, 0x00,
12405 0x00, 0x00, 0x40, 0x00,
12406 0x40, 0x06, 0x00, 0x00,
12407 0x0a, 0x00, 0x00, 0x01,
12408 0x0a, 0x00, 0x00, 0x02,
12409 0x0d, 0x00, 0xe0, 0x00,
12410 0x00, 0x00, 0x01, 0x00,
12411 0x00, 0x00, 0x02, 0x00,
12412 0x80, 0x10, 0x10, 0x00,
12413 0x14, 0x09, 0x00, 0x00,
12414 0x01, 0x01, 0x08, 0x0a,
12415 0x11, 0x11, 0x11, 0x11,
12416 0x11, 0x11, 0x11, 0x11,
12419 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12421 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12422 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12424 struct sk_buff *skb;
12425 u8 *tx_data, *rx_data;
12427 int num_pkts, tx_len, rx_len, i, err;
12428 struct tg3_rx_buffer_desc *desc;
12429 struct tg3_napi *tnapi, *rnapi;
12430 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12432 tnapi = &tp->napi[0];
12433 rnapi = &tp->napi[0];
12434 if (tp->irq_cnt > 1) {
12435 if (tg3_flag(tp, ENABLE_RSS))
12436 rnapi = &tp->napi[1];
12437 if (tg3_flag(tp, ENABLE_TSS))
12438 tnapi = &tp->napi[1];
12440 coal_now = tnapi->coal_now | rnapi->coal_now;
12445 skb = netdev_alloc_skb(tp->dev, tx_len);
12449 tx_data = skb_put(skb, tx_len);
12450 memcpy(tx_data, tp->dev->dev_addr, 6);
12451 memset(tx_data + 6, 0x0, 8);
12453 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12455 if (tso_loopback) {
12456 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12458 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12459 TG3_TSO_TCP_OPT_LEN;
12461 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12462 sizeof(tg3_tso_header));
12465 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12466 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12468 /* Set the total length field in the IP header */
12469 iph->tot_len = htons((u16)(mss + hdr_len));
12471 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12472 TXD_FLAG_CPU_POST_DMA);
12474 if (tg3_flag(tp, HW_TSO_1) ||
12475 tg3_flag(tp, HW_TSO_2) ||
12476 tg3_flag(tp, HW_TSO_3)) {
12478 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12479 th = (struct tcphdr *)&tx_data[val];
12482 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12484 if (tg3_flag(tp, HW_TSO_3)) {
12485 mss |= (hdr_len & 0xc) << 12;
12486 if (hdr_len & 0x10)
12487 base_flags |= 0x00000010;
12488 base_flags |= (hdr_len & 0x3e0) << 5;
12489 } else if (tg3_flag(tp, HW_TSO_2))
12490 mss |= hdr_len << 9;
12491 else if (tg3_flag(tp, HW_TSO_1) ||
12492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12493 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12495 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12498 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12501 data_off = ETH_HLEN;
12503 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12504 tx_len > VLAN_ETH_FRAME_LEN)
12505 base_flags |= TXD_FLAG_JMB_PKT;
12508 for (i = data_off; i < tx_len; i++)
12509 tx_data[i] = (u8) (i & 0xff);
12511 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12512 if (pci_dma_mapping_error(tp->pdev, map)) {
12513 dev_kfree_skb(skb);
12517 val = tnapi->tx_prod;
12518 tnapi->tx_buffers[val].skb = skb;
12519 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12521 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12526 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12528 budget = tg3_tx_avail(tnapi);
12529 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12530 base_flags | TXD_FLAG_END, mss, 0)) {
12531 tnapi->tx_buffers[val].skb = NULL;
12532 dev_kfree_skb(skb);
12538 /* Sync BD data before updating mailbox */
12541 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12542 tr32_mailbox(tnapi->prodmbox);
12546 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12547 for (i = 0; i < 35; i++) {
12548 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12553 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12554 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12555 if ((tx_idx == tnapi->tx_prod) &&
12556 (rx_idx == (rx_start_idx + num_pkts)))
12560 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12561 dev_kfree_skb(skb);
12563 if (tx_idx != tnapi->tx_prod)
12566 if (rx_idx != rx_start_idx + num_pkts)
12570 while (rx_idx != rx_start_idx) {
12571 desc = &rnapi->rx_rcb[rx_start_idx++];
12572 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12573 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12575 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12576 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12579 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12582 if (!tso_loopback) {
12583 if (rx_len != tx_len)
12586 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12587 if (opaque_key != RXD_OPAQUE_RING_STD)
12590 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12593 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12594 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12595 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12599 if (opaque_key == RXD_OPAQUE_RING_STD) {
12600 rx_data = tpr->rx_std_buffers[desc_idx].data;
12601 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12603 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12604 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12605 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12610 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12611 PCI_DMA_FROMDEVICE);
12613 rx_data += TG3_RX_OFFSET(tp);
12614 for (i = data_off; i < rx_len; i++, val++) {
12615 if (*(rx_data + i) != (u8) (val & 0xff))
12622 /* tg3_free_rings will unmap and free the rx_data */
12627 #define TG3_STD_LOOPBACK_FAILED 1
12628 #define TG3_JMB_LOOPBACK_FAILED 2
12629 #define TG3_TSO_LOOPBACK_FAILED 4
12630 #define TG3_LOOPBACK_FAILED \
12631 (TG3_STD_LOOPBACK_FAILED | \
12632 TG3_JMB_LOOPBACK_FAILED | \
12633 TG3_TSO_LOOPBACK_FAILED)
12635 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12639 u32 jmb_pkt_sz = 9000;
12642 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12644 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12645 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12647 if (!netif_running(tp->dev)) {
12648 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12649 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12651 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12655 err = tg3_reset_hw(tp, 1);
12657 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12658 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12660 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12664 if (tg3_flag(tp, ENABLE_RSS)) {
12667 /* Reroute all rx packets to the 1st queue */
12668 for (i = MAC_RSS_INDIR_TBL_0;
12669 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12673 /* HW errata - mac loopback fails in some cases on 5780.
12674 * Normal traffic and PHY loopback are not affected by
12675 * errata. Also, the MAC loopback test is deprecated for
12676 * all newer ASIC revisions.
12678 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12679 !tg3_flag(tp, CPMU_PRESENT)) {
12680 tg3_mac_loopback(tp, true);
12682 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12683 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12685 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12686 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12687 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12689 tg3_mac_loopback(tp, false);
12692 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12693 !tg3_flag(tp, USE_PHYLIB)) {
12696 tg3_phy_lpbk_set(tp, 0, false);
12698 /* Wait for link */
12699 for (i = 0; i < 100; i++) {
12700 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12705 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12706 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12707 if (tg3_flag(tp, TSO_CAPABLE) &&
12708 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12709 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12710 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12711 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12712 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12715 tg3_phy_lpbk_set(tp, 0, true);
12717 /* All link indications report up, but the hardware
12718 * isn't really ready for about 20 msec. Double it
12723 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12724 data[TG3_EXT_LOOPB_TEST] |=
12725 TG3_STD_LOOPBACK_FAILED;
12726 if (tg3_flag(tp, TSO_CAPABLE) &&
12727 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12728 data[TG3_EXT_LOOPB_TEST] |=
12729 TG3_TSO_LOOPBACK_FAILED;
12730 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12731 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12732 data[TG3_EXT_LOOPB_TEST] |=
12733 TG3_JMB_LOOPBACK_FAILED;
12736 /* Re-enable gphy autopowerdown. */
12737 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12738 tg3_phy_toggle_apd(tp, true);
12741 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12742 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12745 tp->phy_flags |= eee_cap;
12750 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12753 struct tg3 *tp = netdev_priv(dev);
12754 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12756 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12757 tg3_power_up(tp)) {
12758 etest->flags |= ETH_TEST_FL_FAILED;
12759 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12763 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12765 if (tg3_test_nvram(tp) != 0) {
12766 etest->flags |= ETH_TEST_FL_FAILED;
12767 data[TG3_NVRAM_TEST] = 1;
12769 if (!doextlpbk && tg3_test_link(tp)) {
12770 etest->flags |= ETH_TEST_FL_FAILED;
12771 data[TG3_LINK_TEST] = 1;
12773 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12774 int err, err2 = 0, irq_sync = 0;
12776 if (netif_running(dev)) {
12778 tg3_netif_stop(tp);
12782 tg3_full_lock(tp, irq_sync);
12783 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12784 err = tg3_nvram_lock(tp);
12785 tg3_halt_cpu(tp, RX_CPU_BASE);
12786 if (!tg3_flag(tp, 5705_PLUS))
12787 tg3_halt_cpu(tp, TX_CPU_BASE);
12789 tg3_nvram_unlock(tp);
12791 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12794 if (tg3_test_registers(tp) != 0) {
12795 etest->flags |= ETH_TEST_FL_FAILED;
12796 data[TG3_REGISTER_TEST] = 1;
12799 if (tg3_test_memory(tp) != 0) {
12800 etest->flags |= ETH_TEST_FL_FAILED;
12801 data[TG3_MEMORY_TEST] = 1;
12805 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12807 if (tg3_test_loopback(tp, data, doextlpbk))
12808 etest->flags |= ETH_TEST_FL_FAILED;
12810 tg3_full_unlock(tp);
12812 if (tg3_test_interrupt(tp) != 0) {
12813 etest->flags |= ETH_TEST_FL_FAILED;
12814 data[TG3_INTERRUPT_TEST] = 1;
12817 tg3_full_lock(tp, 0);
12819 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12820 if (netif_running(dev)) {
12821 tg3_flag_set(tp, INIT_COMPLETE);
12822 err2 = tg3_restart_hw(tp, 1);
12824 tg3_netif_start(tp);
12827 tg3_full_unlock(tp);
12829 if (irq_sync && !err2)
12832 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12833 tg3_power_down(tp);
12837 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12838 struct ifreq *ifr, int cmd)
12840 struct tg3 *tp = netdev_priv(dev);
12841 struct hwtstamp_config stmpconf;
12843 if (!tg3_flag(tp, PTP_CAPABLE))
12846 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12849 if (stmpconf.flags)
12852 switch (stmpconf.tx_type) {
12853 case HWTSTAMP_TX_ON:
12854 tg3_flag_set(tp, TX_TSTAMP_EN);
12856 case HWTSTAMP_TX_OFF:
12857 tg3_flag_clear(tp, TX_TSTAMP_EN);
12863 switch (stmpconf.rx_filter) {
12864 case HWTSTAMP_FILTER_NONE:
12867 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12869 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12871 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12873 TG3_RX_PTP_CTL_SYNC_EVNT;
12875 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12877 TG3_RX_PTP_CTL_DELAY_REQ;
12879 case HWTSTAMP_FILTER_PTP_V2_EVENT:
12880 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12881 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12883 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12884 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12885 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12887 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12888 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12889 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12891 case HWTSTAMP_FILTER_PTP_V2_SYNC:
12892 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12893 TG3_RX_PTP_CTL_SYNC_EVNT;
12895 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12896 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12897 TG3_RX_PTP_CTL_SYNC_EVNT;
12899 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12900 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12901 TG3_RX_PTP_CTL_SYNC_EVNT;
12903 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12904 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12905 TG3_RX_PTP_CTL_DELAY_REQ;
12907 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12908 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12909 TG3_RX_PTP_CTL_DELAY_REQ;
12911 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12912 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12913 TG3_RX_PTP_CTL_DELAY_REQ;
12919 if (netif_running(dev) && tp->rxptpctl)
12920 tw32(TG3_RX_PTP_CTL,
12921 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12923 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12927 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12929 struct mii_ioctl_data *data = if_mii(ifr);
12930 struct tg3 *tp = netdev_priv(dev);
12933 if (tg3_flag(tp, USE_PHYLIB)) {
12934 struct phy_device *phydev;
12935 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12937 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12938 return phy_mii_ioctl(phydev, ifr, cmd);
12943 data->phy_id = tp->phy_addr;
12946 case SIOCGMIIREG: {
12949 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12950 break; /* We have no PHY */
12952 if (!netif_running(dev))
12955 spin_lock_bh(&tp->lock);
12956 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12957 spin_unlock_bh(&tp->lock);
12959 data->val_out = mii_regval;
12965 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12966 break; /* We have no PHY */
12968 if (!netif_running(dev))
12971 spin_lock_bh(&tp->lock);
12972 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12973 spin_unlock_bh(&tp->lock);
12977 case SIOCSHWTSTAMP:
12978 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12984 return -EOPNOTSUPP;
12987 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12989 struct tg3 *tp = netdev_priv(dev);
12991 memcpy(ec, &tp->coal, sizeof(*ec));
12995 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12997 struct tg3 *tp = netdev_priv(dev);
12998 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12999 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13001 if (!tg3_flag(tp, 5705_PLUS)) {
13002 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13003 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13004 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13005 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13008 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13009 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13010 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13011 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13012 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13013 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13014 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13015 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13016 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13017 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13020 /* No rx interrupts will be generated if both are zero */
13021 if ((ec->rx_coalesce_usecs == 0) &&
13022 (ec->rx_max_coalesced_frames == 0))
13025 /* No tx interrupts will be generated if both are zero */
13026 if ((ec->tx_coalesce_usecs == 0) &&
13027 (ec->tx_max_coalesced_frames == 0))
13030 /* Only copy relevant parameters, ignore all others. */
13031 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13032 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13033 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13034 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13035 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13036 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13037 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13038 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13039 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13041 if (netif_running(dev)) {
13042 tg3_full_lock(tp, 0);
13043 __tg3_set_coalesce(tp, &tp->coal);
13044 tg3_full_unlock(tp);
13049 static const struct ethtool_ops tg3_ethtool_ops = {
13050 .get_settings = tg3_get_settings,
13051 .set_settings = tg3_set_settings,
13052 .get_drvinfo = tg3_get_drvinfo,
13053 .get_regs_len = tg3_get_regs_len,
13054 .get_regs = tg3_get_regs,
13055 .get_wol = tg3_get_wol,
13056 .set_wol = tg3_set_wol,
13057 .get_msglevel = tg3_get_msglevel,
13058 .set_msglevel = tg3_set_msglevel,
13059 .nway_reset = tg3_nway_reset,
13060 .get_link = ethtool_op_get_link,
13061 .get_eeprom_len = tg3_get_eeprom_len,
13062 .get_eeprom = tg3_get_eeprom,
13063 .set_eeprom = tg3_set_eeprom,
13064 .get_ringparam = tg3_get_ringparam,
13065 .set_ringparam = tg3_set_ringparam,
13066 .get_pauseparam = tg3_get_pauseparam,
13067 .set_pauseparam = tg3_set_pauseparam,
13068 .self_test = tg3_self_test,
13069 .get_strings = tg3_get_strings,
13070 .set_phys_id = tg3_set_phys_id,
13071 .get_ethtool_stats = tg3_get_ethtool_stats,
13072 .get_coalesce = tg3_get_coalesce,
13073 .set_coalesce = tg3_set_coalesce,
13074 .get_sset_count = tg3_get_sset_count,
13075 .get_rxnfc = tg3_get_rxnfc,
13076 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13077 .get_rxfh_indir = tg3_get_rxfh_indir,
13078 .set_rxfh_indir = tg3_set_rxfh_indir,
13079 .get_channels = tg3_get_channels,
13080 .set_channels = tg3_set_channels,
13081 .get_ts_info = tg3_get_ts_info,
13084 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13085 struct rtnl_link_stats64 *stats)
13087 struct tg3 *tp = netdev_priv(dev);
13089 spin_lock_bh(&tp->lock);
13090 if (!tp->hw_stats) {
13091 spin_unlock_bh(&tp->lock);
13092 return &tp->net_stats_prev;
13095 tg3_get_nstats(tp, stats);
13096 spin_unlock_bh(&tp->lock);
13101 static void tg3_set_rx_mode(struct net_device *dev)
13103 struct tg3 *tp = netdev_priv(dev);
13105 if (!netif_running(dev))
13108 tg3_full_lock(tp, 0);
13109 __tg3_set_rx_mode(dev);
13110 tg3_full_unlock(tp);
13113 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13116 dev->mtu = new_mtu;
13118 if (new_mtu > ETH_DATA_LEN) {
13119 if (tg3_flag(tp, 5780_CLASS)) {
13120 netdev_update_features(dev);
13121 tg3_flag_clear(tp, TSO_CAPABLE);
13123 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13126 if (tg3_flag(tp, 5780_CLASS)) {
13127 tg3_flag_set(tp, TSO_CAPABLE);
13128 netdev_update_features(dev);
13130 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13134 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13136 struct tg3 *tp = netdev_priv(dev);
13137 int err, reset_phy = 0;
13139 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13142 if (!netif_running(dev)) {
13143 /* We'll just catch it later when the
13146 tg3_set_mtu(dev, tp, new_mtu);
13152 tg3_netif_stop(tp);
13154 tg3_full_lock(tp, 1);
13156 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13158 tg3_set_mtu(dev, tp, new_mtu);
13160 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13161 * breaks all requests to 256 bytes.
13163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13166 err = tg3_restart_hw(tp, reset_phy);
13169 tg3_netif_start(tp);
13171 tg3_full_unlock(tp);
13179 static const struct net_device_ops tg3_netdev_ops = {
13180 .ndo_open = tg3_open,
13181 .ndo_stop = tg3_close,
13182 .ndo_start_xmit = tg3_start_xmit,
13183 .ndo_get_stats64 = tg3_get_stats64,
13184 .ndo_validate_addr = eth_validate_addr,
13185 .ndo_set_rx_mode = tg3_set_rx_mode,
13186 .ndo_set_mac_address = tg3_set_mac_addr,
13187 .ndo_do_ioctl = tg3_ioctl,
13188 .ndo_tx_timeout = tg3_tx_timeout,
13189 .ndo_change_mtu = tg3_change_mtu,
13190 .ndo_fix_features = tg3_fix_features,
13191 .ndo_set_features = tg3_set_features,
13192 #ifdef CONFIG_NET_POLL_CONTROLLER
13193 .ndo_poll_controller = tg3_poll_controller,
13197 static void tg3_get_eeprom_size(struct tg3 *tp)
13199 u32 cursize, val, magic;
13201 tp->nvram_size = EEPROM_CHIP_SIZE;
13203 if (tg3_nvram_read(tp, 0, &magic) != 0)
13206 if ((magic != TG3_EEPROM_MAGIC) &&
13207 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13208 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13212 * Size the chip by reading offsets at increasing powers of two.
13213 * When we encounter our validation signature, we know the addressing
13214 * has wrapped around, and thus have our chip size.
13218 while (cursize < tp->nvram_size) {
13219 if (tg3_nvram_read(tp, cursize, &val) != 0)
13228 tp->nvram_size = cursize;
13231 static void tg3_get_nvram_size(struct tg3 *tp)
13235 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13238 /* Selfboot format */
13239 if (val != TG3_EEPROM_MAGIC) {
13240 tg3_get_eeprom_size(tp);
13244 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13246 /* This is confusing. We want to operate on the
13247 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13248 * call will read from NVRAM and byteswap the data
13249 * according to the byteswapping settings for all
13250 * other register accesses. This ensures the data we
13251 * want will always reside in the lower 16-bits.
13252 * However, the data in NVRAM is in LE format, which
13253 * means the data from the NVRAM read will always be
13254 * opposite the endianness of the CPU. The 16-bit
13255 * byteswap then brings the data to CPU endianness.
13257 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13261 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13264 static void tg3_get_nvram_info(struct tg3 *tp)
13268 nvcfg1 = tr32(NVRAM_CFG1);
13269 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13270 tg3_flag_set(tp, FLASH);
13272 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13273 tw32(NVRAM_CFG1, nvcfg1);
13276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13277 tg3_flag(tp, 5780_CLASS)) {
13278 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13279 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13280 tp->nvram_jedecnum = JEDEC_ATMEL;
13281 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13282 tg3_flag_set(tp, NVRAM_BUFFERED);
13284 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13285 tp->nvram_jedecnum = JEDEC_ATMEL;
13286 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13288 case FLASH_VENDOR_ATMEL_EEPROM:
13289 tp->nvram_jedecnum = JEDEC_ATMEL;
13290 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13291 tg3_flag_set(tp, NVRAM_BUFFERED);
13293 case FLASH_VENDOR_ST:
13294 tp->nvram_jedecnum = JEDEC_ST;
13295 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13296 tg3_flag_set(tp, NVRAM_BUFFERED);
13298 case FLASH_VENDOR_SAIFUN:
13299 tp->nvram_jedecnum = JEDEC_SAIFUN;
13300 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13302 case FLASH_VENDOR_SST_SMALL:
13303 case FLASH_VENDOR_SST_LARGE:
13304 tp->nvram_jedecnum = JEDEC_SST;
13305 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13309 tp->nvram_jedecnum = JEDEC_ATMEL;
13310 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13311 tg3_flag_set(tp, NVRAM_BUFFERED);
13315 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13317 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13318 case FLASH_5752PAGE_SIZE_256:
13319 tp->nvram_pagesize = 256;
13321 case FLASH_5752PAGE_SIZE_512:
13322 tp->nvram_pagesize = 512;
13324 case FLASH_5752PAGE_SIZE_1K:
13325 tp->nvram_pagesize = 1024;
13327 case FLASH_5752PAGE_SIZE_2K:
13328 tp->nvram_pagesize = 2048;
13330 case FLASH_5752PAGE_SIZE_4K:
13331 tp->nvram_pagesize = 4096;
13333 case FLASH_5752PAGE_SIZE_264:
13334 tp->nvram_pagesize = 264;
13336 case FLASH_5752PAGE_SIZE_528:
13337 tp->nvram_pagesize = 528;
13342 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13346 nvcfg1 = tr32(NVRAM_CFG1);
13348 /* NVRAM protection for TPM */
13349 if (nvcfg1 & (1 << 27))
13350 tg3_flag_set(tp, PROTECTED_NVRAM);
13352 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13353 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13354 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13355 tp->nvram_jedecnum = JEDEC_ATMEL;
13356 tg3_flag_set(tp, NVRAM_BUFFERED);
13358 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13359 tp->nvram_jedecnum = JEDEC_ATMEL;
13360 tg3_flag_set(tp, NVRAM_BUFFERED);
13361 tg3_flag_set(tp, FLASH);
13363 case FLASH_5752VENDOR_ST_M45PE10:
13364 case FLASH_5752VENDOR_ST_M45PE20:
13365 case FLASH_5752VENDOR_ST_M45PE40:
13366 tp->nvram_jedecnum = JEDEC_ST;
13367 tg3_flag_set(tp, NVRAM_BUFFERED);
13368 tg3_flag_set(tp, FLASH);
13372 if (tg3_flag(tp, FLASH)) {
13373 tg3_nvram_get_pagesize(tp, nvcfg1);
13375 /* For eeprom, set pagesize to maximum eeprom size */
13376 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13378 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13379 tw32(NVRAM_CFG1, nvcfg1);
13383 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13385 u32 nvcfg1, protect = 0;
13387 nvcfg1 = tr32(NVRAM_CFG1);
13389 /* NVRAM protection for TPM */
13390 if (nvcfg1 & (1 << 27)) {
13391 tg3_flag_set(tp, PROTECTED_NVRAM);
13395 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13397 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13398 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13399 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13400 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13401 tp->nvram_jedecnum = JEDEC_ATMEL;
13402 tg3_flag_set(tp, NVRAM_BUFFERED);
13403 tg3_flag_set(tp, FLASH);
13404 tp->nvram_pagesize = 264;
13405 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13406 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13407 tp->nvram_size = (protect ? 0x3e200 :
13408 TG3_NVRAM_SIZE_512KB);
13409 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13410 tp->nvram_size = (protect ? 0x1f200 :
13411 TG3_NVRAM_SIZE_256KB);
13413 tp->nvram_size = (protect ? 0x1f200 :
13414 TG3_NVRAM_SIZE_128KB);
13416 case FLASH_5752VENDOR_ST_M45PE10:
13417 case FLASH_5752VENDOR_ST_M45PE20:
13418 case FLASH_5752VENDOR_ST_M45PE40:
13419 tp->nvram_jedecnum = JEDEC_ST;
13420 tg3_flag_set(tp, NVRAM_BUFFERED);
13421 tg3_flag_set(tp, FLASH);
13422 tp->nvram_pagesize = 256;
13423 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13424 tp->nvram_size = (protect ?
13425 TG3_NVRAM_SIZE_64KB :
13426 TG3_NVRAM_SIZE_128KB);
13427 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13428 tp->nvram_size = (protect ?
13429 TG3_NVRAM_SIZE_64KB :
13430 TG3_NVRAM_SIZE_256KB);
13432 tp->nvram_size = (protect ?
13433 TG3_NVRAM_SIZE_128KB :
13434 TG3_NVRAM_SIZE_512KB);
13439 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13443 nvcfg1 = tr32(NVRAM_CFG1);
13445 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13446 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13447 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13448 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13449 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13450 tp->nvram_jedecnum = JEDEC_ATMEL;
13451 tg3_flag_set(tp, NVRAM_BUFFERED);
13452 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13454 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13455 tw32(NVRAM_CFG1, nvcfg1);
13457 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13458 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13459 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13460 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13461 tp->nvram_jedecnum = JEDEC_ATMEL;
13462 tg3_flag_set(tp, NVRAM_BUFFERED);
13463 tg3_flag_set(tp, FLASH);
13464 tp->nvram_pagesize = 264;
13466 case FLASH_5752VENDOR_ST_M45PE10:
13467 case FLASH_5752VENDOR_ST_M45PE20:
13468 case FLASH_5752VENDOR_ST_M45PE40:
13469 tp->nvram_jedecnum = JEDEC_ST;
13470 tg3_flag_set(tp, NVRAM_BUFFERED);
13471 tg3_flag_set(tp, FLASH);
13472 tp->nvram_pagesize = 256;
13477 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13479 u32 nvcfg1, protect = 0;
13481 nvcfg1 = tr32(NVRAM_CFG1);
13483 /* NVRAM protection for TPM */
13484 if (nvcfg1 & (1 << 27)) {
13485 tg3_flag_set(tp, PROTECTED_NVRAM);
13489 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13491 case FLASH_5761VENDOR_ATMEL_ADB021D:
13492 case FLASH_5761VENDOR_ATMEL_ADB041D:
13493 case FLASH_5761VENDOR_ATMEL_ADB081D:
13494 case FLASH_5761VENDOR_ATMEL_ADB161D:
13495 case FLASH_5761VENDOR_ATMEL_MDB021D:
13496 case FLASH_5761VENDOR_ATMEL_MDB041D:
13497 case FLASH_5761VENDOR_ATMEL_MDB081D:
13498 case FLASH_5761VENDOR_ATMEL_MDB161D:
13499 tp->nvram_jedecnum = JEDEC_ATMEL;
13500 tg3_flag_set(tp, NVRAM_BUFFERED);
13501 tg3_flag_set(tp, FLASH);
13502 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13503 tp->nvram_pagesize = 256;
13505 case FLASH_5761VENDOR_ST_A_M45PE20:
13506 case FLASH_5761VENDOR_ST_A_M45PE40:
13507 case FLASH_5761VENDOR_ST_A_M45PE80:
13508 case FLASH_5761VENDOR_ST_A_M45PE16:
13509 case FLASH_5761VENDOR_ST_M_M45PE20:
13510 case FLASH_5761VENDOR_ST_M_M45PE40:
13511 case FLASH_5761VENDOR_ST_M_M45PE80:
13512 case FLASH_5761VENDOR_ST_M_M45PE16:
13513 tp->nvram_jedecnum = JEDEC_ST;
13514 tg3_flag_set(tp, NVRAM_BUFFERED);
13515 tg3_flag_set(tp, FLASH);
13516 tp->nvram_pagesize = 256;
13521 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13524 case FLASH_5761VENDOR_ATMEL_ADB161D:
13525 case FLASH_5761VENDOR_ATMEL_MDB161D:
13526 case FLASH_5761VENDOR_ST_A_M45PE16:
13527 case FLASH_5761VENDOR_ST_M_M45PE16:
13528 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13530 case FLASH_5761VENDOR_ATMEL_ADB081D:
13531 case FLASH_5761VENDOR_ATMEL_MDB081D:
13532 case FLASH_5761VENDOR_ST_A_M45PE80:
13533 case FLASH_5761VENDOR_ST_M_M45PE80:
13534 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13536 case FLASH_5761VENDOR_ATMEL_ADB041D:
13537 case FLASH_5761VENDOR_ATMEL_MDB041D:
13538 case FLASH_5761VENDOR_ST_A_M45PE40:
13539 case FLASH_5761VENDOR_ST_M_M45PE40:
13540 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13542 case FLASH_5761VENDOR_ATMEL_ADB021D:
13543 case FLASH_5761VENDOR_ATMEL_MDB021D:
13544 case FLASH_5761VENDOR_ST_A_M45PE20:
13545 case FLASH_5761VENDOR_ST_M_M45PE20:
13546 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13552 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13554 tp->nvram_jedecnum = JEDEC_ATMEL;
13555 tg3_flag_set(tp, NVRAM_BUFFERED);
13556 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13559 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13563 nvcfg1 = tr32(NVRAM_CFG1);
13565 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13566 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13567 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13568 tp->nvram_jedecnum = JEDEC_ATMEL;
13569 tg3_flag_set(tp, NVRAM_BUFFERED);
13570 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13572 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13573 tw32(NVRAM_CFG1, nvcfg1);
13575 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13576 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13577 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13578 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13579 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13580 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13581 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13582 tp->nvram_jedecnum = JEDEC_ATMEL;
13583 tg3_flag_set(tp, NVRAM_BUFFERED);
13584 tg3_flag_set(tp, FLASH);
13586 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13587 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13588 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13589 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13590 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13592 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13593 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13594 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13596 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13597 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13598 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13602 case FLASH_5752VENDOR_ST_M45PE10:
13603 case FLASH_5752VENDOR_ST_M45PE20:
13604 case FLASH_5752VENDOR_ST_M45PE40:
13605 tp->nvram_jedecnum = JEDEC_ST;
13606 tg3_flag_set(tp, NVRAM_BUFFERED);
13607 tg3_flag_set(tp, FLASH);
13609 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13610 case FLASH_5752VENDOR_ST_M45PE10:
13611 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13613 case FLASH_5752VENDOR_ST_M45PE20:
13614 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13616 case FLASH_5752VENDOR_ST_M45PE40:
13617 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13622 tg3_flag_set(tp, NO_NVRAM);
13626 tg3_nvram_get_pagesize(tp, nvcfg1);
13627 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13628 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13632 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13636 nvcfg1 = tr32(NVRAM_CFG1);
13638 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13639 case FLASH_5717VENDOR_ATMEL_EEPROM:
13640 case FLASH_5717VENDOR_MICRO_EEPROM:
13641 tp->nvram_jedecnum = JEDEC_ATMEL;
13642 tg3_flag_set(tp, NVRAM_BUFFERED);
13643 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13645 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13646 tw32(NVRAM_CFG1, nvcfg1);
13648 case FLASH_5717VENDOR_ATMEL_MDB011D:
13649 case FLASH_5717VENDOR_ATMEL_ADB011B:
13650 case FLASH_5717VENDOR_ATMEL_ADB011D:
13651 case FLASH_5717VENDOR_ATMEL_MDB021D:
13652 case FLASH_5717VENDOR_ATMEL_ADB021B:
13653 case FLASH_5717VENDOR_ATMEL_ADB021D:
13654 case FLASH_5717VENDOR_ATMEL_45USPT:
13655 tp->nvram_jedecnum = JEDEC_ATMEL;
13656 tg3_flag_set(tp, NVRAM_BUFFERED);
13657 tg3_flag_set(tp, FLASH);
13659 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13660 case FLASH_5717VENDOR_ATMEL_MDB021D:
13661 /* Detect size with tg3_nvram_get_size() */
13663 case FLASH_5717VENDOR_ATMEL_ADB021B:
13664 case FLASH_5717VENDOR_ATMEL_ADB021D:
13665 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13668 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13672 case FLASH_5717VENDOR_ST_M_M25PE10:
13673 case FLASH_5717VENDOR_ST_A_M25PE10:
13674 case FLASH_5717VENDOR_ST_M_M45PE10:
13675 case FLASH_5717VENDOR_ST_A_M45PE10:
13676 case FLASH_5717VENDOR_ST_M_M25PE20:
13677 case FLASH_5717VENDOR_ST_A_M25PE20:
13678 case FLASH_5717VENDOR_ST_M_M45PE20:
13679 case FLASH_5717VENDOR_ST_A_M45PE20:
13680 case FLASH_5717VENDOR_ST_25USPT:
13681 case FLASH_5717VENDOR_ST_45USPT:
13682 tp->nvram_jedecnum = JEDEC_ST;
13683 tg3_flag_set(tp, NVRAM_BUFFERED);
13684 tg3_flag_set(tp, FLASH);
13686 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13687 case FLASH_5717VENDOR_ST_M_M25PE20:
13688 case FLASH_5717VENDOR_ST_M_M45PE20:
13689 /* Detect size with tg3_nvram_get_size() */
13691 case FLASH_5717VENDOR_ST_A_M25PE20:
13692 case FLASH_5717VENDOR_ST_A_M45PE20:
13693 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13696 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13701 tg3_flag_set(tp, NO_NVRAM);
13705 tg3_nvram_get_pagesize(tp, nvcfg1);
13706 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13707 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13710 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13712 u32 nvcfg1, nvmpinstrp;
13714 nvcfg1 = tr32(NVRAM_CFG1);
13715 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13717 switch (nvmpinstrp) {
13718 case FLASH_5720_EEPROM_HD:
13719 case FLASH_5720_EEPROM_LD:
13720 tp->nvram_jedecnum = JEDEC_ATMEL;
13721 tg3_flag_set(tp, NVRAM_BUFFERED);
13723 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13724 tw32(NVRAM_CFG1, nvcfg1);
13725 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13726 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13728 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13730 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13731 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13732 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13733 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13734 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13735 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13736 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13737 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13738 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13739 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13740 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13741 case FLASH_5720VENDOR_ATMEL_45USPT:
13742 tp->nvram_jedecnum = JEDEC_ATMEL;
13743 tg3_flag_set(tp, NVRAM_BUFFERED);
13744 tg3_flag_set(tp, FLASH);
13746 switch (nvmpinstrp) {
13747 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13748 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13749 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13750 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13752 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13753 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13754 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13755 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13757 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13758 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13759 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13762 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13766 case FLASH_5720VENDOR_M_ST_M25PE10:
13767 case FLASH_5720VENDOR_M_ST_M45PE10:
13768 case FLASH_5720VENDOR_A_ST_M25PE10:
13769 case FLASH_5720VENDOR_A_ST_M45PE10:
13770 case FLASH_5720VENDOR_M_ST_M25PE20:
13771 case FLASH_5720VENDOR_M_ST_M45PE20:
13772 case FLASH_5720VENDOR_A_ST_M25PE20:
13773 case FLASH_5720VENDOR_A_ST_M45PE20:
13774 case FLASH_5720VENDOR_M_ST_M25PE40:
13775 case FLASH_5720VENDOR_M_ST_M45PE40:
13776 case FLASH_5720VENDOR_A_ST_M25PE40:
13777 case FLASH_5720VENDOR_A_ST_M45PE40:
13778 case FLASH_5720VENDOR_M_ST_M25PE80:
13779 case FLASH_5720VENDOR_M_ST_M45PE80:
13780 case FLASH_5720VENDOR_A_ST_M25PE80:
13781 case FLASH_5720VENDOR_A_ST_M45PE80:
13782 case FLASH_5720VENDOR_ST_25USPT:
13783 case FLASH_5720VENDOR_ST_45USPT:
13784 tp->nvram_jedecnum = JEDEC_ST;
13785 tg3_flag_set(tp, NVRAM_BUFFERED);
13786 tg3_flag_set(tp, FLASH);
13788 switch (nvmpinstrp) {
13789 case FLASH_5720VENDOR_M_ST_M25PE20:
13790 case FLASH_5720VENDOR_M_ST_M45PE20:
13791 case FLASH_5720VENDOR_A_ST_M25PE20:
13792 case FLASH_5720VENDOR_A_ST_M45PE20:
13793 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13795 case FLASH_5720VENDOR_M_ST_M25PE40:
13796 case FLASH_5720VENDOR_M_ST_M45PE40:
13797 case FLASH_5720VENDOR_A_ST_M25PE40:
13798 case FLASH_5720VENDOR_A_ST_M45PE40:
13799 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13801 case FLASH_5720VENDOR_M_ST_M25PE80:
13802 case FLASH_5720VENDOR_M_ST_M45PE80:
13803 case FLASH_5720VENDOR_A_ST_M25PE80:
13804 case FLASH_5720VENDOR_A_ST_M45PE80:
13805 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13808 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13813 tg3_flag_set(tp, NO_NVRAM);
13817 tg3_nvram_get_pagesize(tp, nvcfg1);
13818 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13819 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13822 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13823 static void tg3_nvram_init(struct tg3 *tp)
13825 tw32_f(GRC_EEPROM_ADDR,
13826 (EEPROM_ADDR_FSM_RESET |
13827 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13828 EEPROM_ADDR_CLKPERD_SHIFT)));
13832 /* Enable seeprom accesses. */
13833 tw32_f(GRC_LOCAL_CTRL,
13834 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13837 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13838 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13839 tg3_flag_set(tp, NVRAM);
13841 if (tg3_nvram_lock(tp)) {
13842 netdev_warn(tp->dev,
13843 "Cannot get nvram lock, %s failed\n",
13847 tg3_enable_nvram_access(tp);
13849 tp->nvram_size = 0;
13851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13852 tg3_get_5752_nvram_info(tp);
13853 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13854 tg3_get_5755_nvram_info(tp);
13855 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13856 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13858 tg3_get_5787_nvram_info(tp);
13859 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13860 tg3_get_5761_nvram_info(tp);
13861 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13862 tg3_get_5906_nvram_info(tp);
13863 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13864 tg3_flag(tp, 57765_CLASS))
13865 tg3_get_57780_nvram_info(tp);
13866 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13867 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13868 tg3_get_5717_nvram_info(tp);
13869 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13870 tg3_get_5720_nvram_info(tp);
13872 tg3_get_nvram_info(tp);
13874 if (tp->nvram_size == 0)
13875 tg3_get_nvram_size(tp);
13877 tg3_disable_nvram_access(tp);
13878 tg3_nvram_unlock(tp);
13881 tg3_flag_clear(tp, NVRAM);
13882 tg3_flag_clear(tp, NVRAM_BUFFERED);
13884 tg3_get_eeprom_size(tp);
13888 struct subsys_tbl_ent {
13889 u16 subsys_vendor, subsys_devid;
13893 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13894 /* Broadcom boards. */
13895 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13896 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13897 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13898 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13899 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13900 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13901 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13902 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13903 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13904 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13905 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13906 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13907 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13908 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13909 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13910 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13911 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13912 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13913 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13914 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13915 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13916 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13919 { TG3PCI_SUBVENDOR_ID_3COM,
13920 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13921 { TG3PCI_SUBVENDOR_ID_3COM,
13922 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13923 { TG3PCI_SUBVENDOR_ID_3COM,
13924 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13925 { TG3PCI_SUBVENDOR_ID_3COM,
13926 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13927 { TG3PCI_SUBVENDOR_ID_3COM,
13928 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13931 { TG3PCI_SUBVENDOR_ID_DELL,
13932 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13933 { TG3PCI_SUBVENDOR_ID_DELL,
13934 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13935 { TG3PCI_SUBVENDOR_ID_DELL,
13936 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13937 { TG3PCI_SUBVENDOR_ID_DELL,
13938 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13940 /* Compaq boards. */
13941 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13942 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13943 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13944 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13945 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13946 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13947 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13948 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13949 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13950 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13953 { TG3PCI_SUBVENDOR_ID_IBM,
13954 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13957 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13961 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13962 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13963 tp->pdev->subsystem_vendor) &&
13964 (subsys_id_to_phy_id[i].subsys_devid ==
13965 tp->pdev->subsystem_device))
13966 return &subsys_id_to_phy_id[i];
13971 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13975 tp->phy_id = TG3_PHY_ID_INVALID;
13976 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13978 /* Assume an onboard device and WOL capable by default. */
13979 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13980 tg3_flag_set(tp, WOL_CAP);
13982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13983 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13984 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13985 tg3_flag_set(tp, IS_NIC);
13987 val = tr32(VCPU_CFGSHDW);
13988 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13989 tg3_flag_set(tp, ASPM_WORKAROUND);
13990 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13991 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13992 tg3_flag_set(tp, WOL_ENABLE);
13993 device_set_wakeup_enable(&tp->pdev->dev, true);
13998 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13999 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14000 u32 nic_cfg, led_cfg;
14001 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14002 int eeprom_phy_serdes = 0;
14004 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14005 tp->nic_sram_data_cfg = nic_cfg;
14007 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14008 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14009 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14010 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14011 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14012 (ver > 0) && (ver < 0x100))
14013 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14015 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14016 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14018 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14019 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14020 eeprom_phy_serdes = 1;
14022 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14023 if (nic_phy_id != 0) {
14024 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14025 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14027 eeprom_phy_id = (id1 >> 16) << 10;
14028 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14029 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14033 tp->phy_id = eeprom_phy_id;
14034 if (eeprom_phy_serdes) {
14035 if (!tg3_flag(tp, 5705_PLUS))
14036 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14038 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14041 if (tg3_flag(tp, 5750_PLUS))
14042 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14043 SHASTA_EXT_LED_MODE_MASK);
14045 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14049 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14050 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14053 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14054 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14057 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14058 tp->led_ctrl = LED_CTRL_MODE_MAC;
14060 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14061 * read on some older 5700/5701 bootcode.
14063 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14065 GET_ASIC_REV(tp->pci_chip_rev_id) ==
14067 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14071 case SHASTA_EXT_LED_SHARED:
14072 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14073 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14074 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14075 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14076 LED_CTRL_MODE_PHY_2);
14079 case SHASTA_EXT_LED_MAC:
14080 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14083 case SHASTA_EXT_LED_COMBO:
14084 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14085 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14086 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14087 LED_CTRL_MODE_PHY_2);
14092 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14094 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14095 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14097 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14098 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14100 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14101 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14102 if ((tp->pdev->subsystem_vendor ==
14103 PCI_VENDOR_ID_ARIMA) &&
14104 (tp->pdev->subsystem_device == 0x205a ||
14105 tp->pdev->subsystem_device == 0x2063))
14106 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14108 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14109 tg3_flag_set(tp, IS_NIC);
14112 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14113 tg3_flag_set(tp, ENABLE_ASF);
14114 if (tg3_flag(tp, 5750_PLUS))
14115 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14118 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14119 tg3_flag(tp, 5750_PLUS))
14120 tg3_flag_set(tp, ENABLE_APE);
14122 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14123 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14124 tg3_flag_clear(tp, WOL_CAP);
14126 if (tg3_flag(tp, WOL_CAP) &&
14127 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14128 tg3_flag_set(tp, WOL_ENABLE);
14129 device_set_wakeup_enable(&tp->pdev->dev, true);
14132 if (cfg2 & (1 << 17))
14133 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14135 /* serdes signal pre-emphasis in register 0x590 set by */
14136 /* bootcode if bit 18 is set */
14137 if (cfg2 & (1 << 18))
14138 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14140 if ((tg3_flag(tp, 57765_PLUS) ||
14141 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14142 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14143 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14144 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14146 if (tg3_flag(tp, PCI_EXPRESS) &&
14147 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14148 !tg3_flag(tp, 57765_PLUS)) {
14151 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14152 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14153 tg3_flag_set(tp, ASPM_WORKAROUND);
14156 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14157 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14158 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14159 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14160 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14161 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14164 if (tg3_flag(tp, WOL_CAP))
14165 device_set_wakeup_enable(&tp->pdev->dev,
14166 tg3_flag(tp, WOL_ENABLE));
14168 device_set_wakeup_capable(&tp->pdev->dev, false);
14171 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14176 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14177 tw32(OTP_CTRL, cmd);
14179 /* Wait for up to 1 ms for command to execute. */
14180 for (i = 0; i < 100; i++) {
14181 val = tr32(OTP_STATUS);
14182 if (val & OTP_STATUS_CMD_DONE)
14187 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14190 /* Read the gphy configuration from the OTP region of the chip. The gphy
14191 * configuration is a 32-bit value that straddles the alignment boundary.
14192 * We do two 32-bit reads and then shift and merge the results.
14194 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14196 u32 bhalf_otp, thalf_otp;
14198 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14200 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14203 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14205 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14208 thalf_otp = tr32(OTP_READ_DATA);
14210 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14212 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14215 bhalf_otp = tr32(OTP_READ_DATA);
14217 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14220 static void tg3_phy_init_link_config(struct tg3 *tp)
14222 u32 adv = ADVERTISED_Autoneg;
14224 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14225 adv |= ADVERTISED_1000baseT_Half |
14226 ADVERTISED_1000baseT_Full;
14228 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14229 adv |= ADVERTISED_100baseT_Half |
14230 ADVERTISED_100baseT_Full |
14231 ADVERTISED_10baseT_Half |
14232 ADVERTISED_10baseT_Full |
14235 adv |= ADVERTISED_FIBRE;
14237 tp->link_config.advertising = adv;
14238 tp->link_config.speed = SPEED_UNKNOWN;
14239 tp->link_config.duplex = DUPLEX_UNKNOWN;
14240 tp->link_config.autoneg = AUTONEG_ENABLE;
14241 tp->link_config.active_speed = SPEED_UNKNOWN;
14242 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14247 static int tg3_phy_probe(struct tg3 *tp)
14249 u32 hw_phy_id_1, hw_phy_id_2;
14250 u32 hw_phy_id, hw_phy_id_masked;
14253 /* flow control autonegotiation is default behavior */
14254 tg3_flag_set(tp, PAUSE_AUTONEG);
14255 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14257 if (tg3_flag(tp, ENABLE_APE)) {
14258 switch (tp->pci_fn) {
14260 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14263 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14266 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14269 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14274 if (tg3_flag(tp, USE_PHYLIB))
14275 return tg3_phy_init(tp);
14277 /* Reading the PHY ID register can conflict with ASF
14278 * firmware access to the PHY hardware.
14281 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14282 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14284 /* Now read the physical PHY_ID from the chip and verify
14285 * that it is sane. If it doesn't look good, we fall back
14286 * to either the hard-coded table based PHY_ID and failing
14287 * that the value found in the eeprom area.
14289 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14290 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14292 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14293 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14294 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14296 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14299 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14300 tp->phy_id = hw_phy_id;
14301 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14302 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14304 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14306 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14307 /* Do nothing, phy ID already set up in
14308 * tg3_get_eeprom_hw_cfg().
14311 struct subsys_tbl_ent *p;
14313 /* No eeprom signature? Try the hardcoded
14314 * subsys device table.
14316 p = tg3_lookup_by_subsys(tp);
14320 tp->phy_id = p->phy_id;
14322 tp->phy_id == TG3_PHY_ID_BCM8002)
14323 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14327 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14328 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14331 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14332 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14333 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14334 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14335 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14337 tg3_phy_init_link_config(tp);
14339 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14340 !tg3_flag(tp, ENABLE_APE) &&
14341 !tg3_flag(tp, ENABLE_ASF)) {
14344 tg3_readphy(tp, MII_BMSR, &bmsr);
14345 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14346 (bmsr & BMSR_LSTATUS))
14347 goto skip_phy_reset;
14349 err = tg3_phy_reset(tp);
14353 tg3_phy_set_wirespeed(tp);
14355 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14356 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14357 tp->link_config.flowctrl);
14359 tg3_writephy(tp, MII_BMCR,
14360 BMCR_ANENABLE | BMCR_ANRESTART);
14365 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14366 err = tg3_init_5401phy_dsp(tp);
14370 err = tg3_init_5401phy_dsp(tp);
14376 static void tg3_read_vpd(struct tg3 *tp)
14379 unsigned int block_end, rosize, len;
14383 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14387 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14389 goto out_not_found;
14391 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14392 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14393 i += PCI_VPD_LRDT_TAG_SIZE;
14395 if (block_end > vpdlen)
14396 goto out_not_found;
14398 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14399 PCI_VPD_RO_KEYWORD_MFR_ID);
14401 len = pci_vpd_info_field_size(&vpd_data[j]);
14403 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14404 if (j + len > block_end || len != 4 ||
14405 memcmp(&vpd_data[j], "1028", 4))
14408 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14409 PCI_VPD_RO_KEYWORD_VENDOR0);
14413 len = pci_vpd_info_field_size(&vpd_data[j]);
14415 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14416 if (j + len > block_end)
14419 memcpy(tp->fw_ver, &vpd_data[j], len);
14420 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14424 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14425 PCI_VPD_RO_KEYWORD_PARTNO);
14427 goto out_not_found;
14429 len = pci_vpd_info_field_size(&vpd_data[i]);
14431 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14432 if (len > TG3_BPN_SIZE ||
14433 (len + i) > vpdlen)
14434 goto out_not_found;
14436 memcpy(tp->board_part_number, &vpd_data[i], len);
14440 if (tp->board_part_number[0])
14444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14445 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14446 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14447 strcpy(tp->board_part_number, "BCM5717");
14448 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14449 strcpy(tp->board_part_number, "BCM5718");
14452 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14453 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14454 strcpy(tp->board_part_number, "BCM57780");
14455 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14456 strcpy(tp->board_part_number, "BCM57760");
14457 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14458 strcpy(tp->board_part_number, "BCM57790");
14459 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14460 strcpy(tp->board_part_number, "BCM57788");
14463 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14464 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14465 strcpy(tp->board_part_number, "BCM57761");
14466 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14467 strcpy(tp->board_part_number, "BCM57765");
14468 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14469 strcpy(tp->board_part_number, "BCM57781");
14470 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14471 strcpy(tp->board_part_number, "BCM57785");
14472 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14473 strcpy(tp->board_part_number, "BCM57791");
14474 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14475 strcpy(tp->board_part_number, "BCM57795");
14478 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14479 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14480 strcpy(tp->board_part_number, "BCM57762");
14481 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14482 strcpy(tp->board_part_number, "BCM57766");
14483 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14484 strcpy(tp->board_part_number, "BCM57782");
14485 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14486 strcpy(tp->board_part_number, "BCM57786");
14489 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14490 strcpy(tp->board_part_number, "BCM95906");
14493 strcpy(tp->board_part_number, "none");
14497 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14501 if (tg3_nvram_read(tp, offset, &val) ||
14502 (val & 0xfc000000) != 0x0c000000 ||
14503 tg3_nvram_read(tp, offset + 4, &val) ||
14510 static void tg3_read_bc_ver(struct tg3 *tp)
14512 u32 val, offset, start, ver_offset;
14514 bool newver = false;
14516 if (tg3_nvram_read(tp, 0xc, &offset) ||
14517 tg3_nvram_read(tp, 0x4, &start))
14520 offset = tg3_nvram_logical_addr(tp, offset);
14522 if (tg3_nvram_read(tp, offset, &val))
14525 if ((val & 0xfc000000) == 0x0c000000) {
14526 if (tg3_nvram_read(tp, offset + 4, &val))
14533 dst_off = strlen(tp->fw_ver);
14536 if (TG3_VER_SIZE - dst_off < 16 ||
14537 tg3_nvram_read(tp, offset + 8, &ver_offset))
14540 offset = offset + ver_offset - start;
14541 for (i = 0; i < 16; i += 4) {
14543 if (tg3_nvram_read_be32(tp, offset + i, &v))
14546 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14551 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14554 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14555 TG3_NVM_BCVER_MAJSFT;
14556 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14557 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14558 "v%d.%02d", major, minor);
14562 static void tg3_read_hwsb_ver(struct tg3 *tp)
14564 u32 val, major, minor;
14566 /* Use native endian representation */
14567 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14570 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14571 TG3_NVM_HWSB_CFG1_MAJSFT;
14572 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14573 TG3_NVM_HWSB_CFG1_MINSFT;
14575 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14578 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14580 u32 offset, major, minor, build;
14582 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14584 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14587 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14588 case TG3_EEPROM_SB_REVISION_0:
14589 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14591 case TG3_EEPROM_SB_REVISION_2:
14592 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14594 case TG3_EEPROM_SB_REVISION_3:
14595 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14597 case TG3_EEPROM_SB_REVISION_4:
14598 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14600 case TG3_EEPROM_SB_REVISION_5:
14601 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14603 case TG3_EEPROM_SB_REVISION_6:
14604 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14610 if (tg3_nvram_read(tp, offset, &val))
14613 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14614 TG3_EEPROM_SB_EDH_BLD_SHFT;
14615 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14616 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14617 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14619 if (minor > 99 || build > 26)
14622 offset = strlen(tp->fw_ver);
14623 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14624 " v%d.%02d", major, minor);
14627 offset = strlen(tp->fw_ver);
14628 if (offset < TG3_VER_SIZE - 1)
14629 tp->fw_ver[offset] = 'a' + build - 1;
14633 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14635 u32 val, offset, start;
14638 for (offset = TG3_NVM_DIR_START;
14639 offset < TG3_NVM_DIR_END;
14640 offset += TG3_NVM_DIRENT_SIZE) {
14641 if (tg3_nvram_read(tp, offset, &val))
14644 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14648 if (offset == TG3_NVM_DIR_END)
14651 if (!tg3_flag(tp, 5705_PLUS))
14652 start = 0x08000000;
14653 else if (tg3_nvram_read(tp, offset - 4, &start))
14656 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14657 !tg3_fw_img_is_valid(tp, offset) ||
14658 tg3_nvram_read(tp, offset + 8, &val))
14661 offset += val - start;
14663 vlen = strlen(tp->fw_ver);
14665 tp->fw_ver[vlen++] = ',';
14666 tp->fw_ver[vlen++] = ' ';
14668 for (i = 0; i < 4; i++) {
14670 if (tg3_nvram_read_be32(tp, offset, &v))
14673 offset += sizeof(v);
14675 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14676 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14680 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14685 static void tg3_probe_ncsi(struct tg3 *tp)
14689 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14690 if (apedata != APE_SEG_SIG_MAGIC)
14693 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14694 if (!(apedata & APE_FW_STATUS_READY))
14697 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14698 tg3_flag_set(tp, APE_HAS_NCSI);
14701 static void tg3_read_dash_ver(struct tg3 *tp)
14707 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14709 if (tg3_flag(tp, APE_HAS_NCSI))
14714 vlen = strlen(tp->fw_ver);
14716 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14718 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14719 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14720 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14721 (apedata & APE_FW_VERSION_BLDMSK));
14724 static void tg3_read_fw_ver(struct tg3 *tp)
14727 bool vpd_vers = false;
14729 if (tp->fw_ver[0] != 0)
14732 if (tg3_flag(tp, NO_NVRAM)) {
14733 strcat(tp->fw_ver, "sb");
14737 if (tg3_nvram_read(tp, 0, &val))
14740 if (val == TG3_EEPROM_MAGIC)
14741 tg3_read_bc_ver(tp);
14742 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14743 tg3_read_sb_ver(tp, val);
14744 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14745 tg3_read_hwsb_ver(tp);
14747 if (tg3_flag(tp, ENABLE_ASF)) {
14748 if (tg3_flag(tp, ENABLE_APE)) {
14749 tg3_probe_ncsi(tp);
14751 tg3_read_dash_ver(tp);
14752 } else if (!vpd_vers) {
14753 tg3_read_mgmtfw_ver(tp);
14757 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14760 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14762 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14763 return TG3_RX_RET_MAX_SIZE_5717;
14764 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14765 return TG3_RX_RET_MAX_SIZE_5700;
14767 return TG3_RX_RET_MAX_SIZE_5705;
14770 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14771 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14772 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14773 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14777 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14779 struct pci_dev *peer;
14780 unsigned int func, devnr = tp->pdev->devfn & ~7;
14782 for (func = 0; func < 8; func++) {
14783 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14784 if (peer && peer != tp->pdev)
14788 /* 5704 can be configured in single-port mode, set peer to
14789 * tp->pdev in that case.
14797 * We don't need to keep the refcount elevated; there's no way
14798 * to remove one half of this device without removing the other
14805 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14807 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14811 /* All devices that use the alternate
14812 * ASIC REV location have a CPMU.
14814 tg3_flag_set(tp, CPMU_PRESENT);
14816 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14817 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14818 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14819 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14820 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
14821 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
14822 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
14823 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
14824 reg = TG3PCI_GEN2_PRODID_ASICREV;
14825 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14826 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14827 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14828 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14829 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14830 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14831 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14832 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14833 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14834 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14835 reg = TG3PCI_GEN15_PRODID_ASICREV;
14837 reg = TG3PCI_PRODID_ASICREV;
14839 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14842 /* Wrong chip ID in 5752 A0. This code can be removed later
14843 * as A0 is not in production.
14845 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14846 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14848 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14849 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14853 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14854 tg3_flag_set(tp, 5717_PLUS);
14856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14858 tg3_flag_set(tp, 57765_CLASS);
14860 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
14861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14862 tg3_flag_set(tp, 57765_PLUS);
14864 /* Intentionally exclude ASIC_REV_5906 */
14865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14867 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14868 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14871 tg3_flag(tp, 57765_PLUS))
14872 tg3_flag_set(tp, 5755_PLUS);
14874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14876 tg3_flag_set(tp, 5780_CLASS);
14878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14881 tg3_flag(tp, 5755_PLUS) ||
14882 tg3_flag(tp, 5780_CLASS))
14883 tg3_flag_set(tp, 5750_PLUS);
14885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14886 tg3_flag(tp, 5750_PLUS))
14887 tg3_flag_set(tp, 5705_PLUS);
14890 static bool tg3_10_100_only_device(struct tg3 *tp,
14891 const struct pci_device_id *ent)
14893 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14895 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14896 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14897 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14900 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14902 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14912 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
14915 u32 pci_state_reg, grc_misc_cfg;
14920 /* Force memory write invalidate off. If we leave it on,
14921 * then on 5700_BX chips we have to enable a workaround.
14922 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14923 * to match the cacheline size. The Broadcom driver have this
14924 * workaround but turns MWI off all the times so never uses
14925 * it. This seems to suggest that the workaround is insufficient.
14927 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14928 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14929 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14931 /* Important! -- Make sure register accesses are byteswapped
14932 * correctly. Also, for those chips that require it, make
14933 * sure that indirect register accesses are enabled before
14934 * the first operation.
14936 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14938 tp->misc_host_ctrl |= (misc_ctrl_reg &
14939 MISC_HOST_CTRL_CHIPREV);
14940 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14941 tp->misc_host_ctrl);
14943 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14945 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14946 * we need to disable memory and use config. cycles
14947 * only to access all registers. The 5702/03 chips
14948 * can mistakenly decode the special cycles from the
14949 * ICH chipsets as memory write cycles, causing corruption
14950 * of register and memory space. Only certain ICH bridges
14951 * will drive special cycles with non-zero data during the
14952 * address phase which can fall within the 5703's address
14953 * range. This is not an ICH bug as the PCI spec allows
14954 * non-zero address during special cycles. However, only
14955 * these ICH bridges are known to drive non-zero addresses
14956 * during special cycles.
14958 * Since special cycles do not cross PCI bridges, we only
14959 * enable this workaround if the 5703 is on the secondary
14960 * bus of these ICH bridges.
14962 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14963 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14964 static struct tg3_dev_id {
14968 } ich_chipsets[] = {
14969 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14971 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14973 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14975 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14979 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14980 struct pci_dev *bridge = NULL;
14982 while (pci_id->vendor != 0) {
14983 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14989 if (pci_id->rev != PCI_ANY_ID) {
14990 if (bridge->revision > pci_id->rev)
14993 if (bridge->subordinate &&
14994 (bridge->subordinate->number ==
14995 tp->pdev->bus->number)) {
14996 tg3_flag_set(tp, ICH_WORKAROUND);
14997 pci_dev_put(bridge);
15003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15004 static struct tg3_dev_id {
15007 } bridge_chipsets[] = {
15008 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15009 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15012 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15013 struct pci_dev *bridge = NULL;
15015 while (pci_id->vendor != 0) {
15016 bridge = pci_get_device(pci_id->vendor,
15023 if (bridge->subordinate &&
15024 (bridge->subordinate->number <=
15025 tp->pdev->bus->number) &&
15026 (bridge->subordinate->busn_res.end >=
15027 tp->pdev->bus->number)) {
15028 tg3_flag_set(tp, 5701_DMA_BUG);
15029 pci_dev_put(bridge);
15035 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15036 * DMA addresses > 40-bit. This bridge may have other additional
15037 * 57xx devices behind it in some 4-port NIC designs for example.
15038 * Any tg3 device found behind the bridge will also need the 40-bit
15041 if (tg3_flag(tp, 5780_CLASS)) {
15042 tg3_flag_set(tp, 40BIT_DMA_BUG);
15043 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15045 struct pci_dev *bridge = NULL;
15048 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15049 PCI_DEVICE_ID_SERVERWORKS_EPB,
15051 if (bridge && bridge->subordinate &&
15052 (bridge->subordinate->number <=
15053 tp->pdev->bus->number) &&
15054 (bridge->subordinate->busn_res.end >=
15055 tp->pdev->bus->number)) {
15056 tg3_flag_set(tp, 40BIT_DMA_BUG);
15057 pci_dev_put(bridge);
15063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15065 tp->pdev_peer = tg3_find_peer(tp);
15067 /* Determine TSO capabilities */
15068 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15069 ; /* Do nothing. HW bug. */
15070 else if (tg3_flag(tp, 57765_PLUS))
15071 tg3_flag_set(tp, HW_TSO_3);
15072 else if (tg3_flag(tp, 5755_PLUS) ||
15073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15074 tg3_flag_set(tp, HW_TSO_2);
15075 else if (tg3_flag(tp, 5750_PLUS)) {
15076 tg3_flag_set(tp, HW_TSO_1);
15077 tg3_flag_set(tp, TSO_BUG);
15078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15079 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15080 tg3_flag_clear(tp, TSO_BUG);
15081 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15082 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15083 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15084 tg3_flag_set(tp, TSO_BUG);
15085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15086 tp->fw_needed = FIRMWARE_TG3TSO5;
15088 tp->fw_needed = FIRMWARE_TG3TSO;
15091 /* Selectively allow TSO based on operating conditions */
15092 if (tg3_flag(tp, HW_TSO_1) ||
15093 tg3_flag(tp, HW_TSO_2) ||
15094 tg3_flag(tp, HW_TSO_3) ||
15096 /* For firmware TSO, assume ASF is disabled.
15097 * We'll disable TSO later if we discover ASF
15098 * is enabled in tg3_get_eeprom_hw_cfg().
15100 tg3_flag_set(tp, TSO_CAPABLE);
15102 tg3_flag_clear(tp, TSO_CAPABLE);
15103 tg3_flag_clear(tp, TSO_BUG);
15104 tp->fw_needed = NULL;
15107 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15108 tp->fw_needed = FIRMWARE_TG3;
15112 if (tg3_flag(tp, 5750_PLUS)) {
15113 tg3_flag_set(tp, SUPPORT_MSI);
15114 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15115 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15116 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15117 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15118 tp->pdev_peer == tp->pdev))
15119 tg3_flag_clear(tp, SUPPORT_MSI);
15121 if (tg3_flag(tp, 5755_PLUS) ||
15122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15123 tg3_flag_set(tp, 1SHOT_MSI);
15126 if (tg3_flag(tp, 57765_PLUS)) {
15127 tg3_flag_set(tp, SUPPORT_MSIX);
15128 tp->irq_max = TG3_IRQ_MAX_VECS;
15134 if (tp->irq_max > 1) {
15135 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15136 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15140 tp->txq_max = tp->irq_max - 1;
15143 if (tg3_flag(tp, 5755_PLUS) ||
15144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15145 tg3_flag_set(tp, SHORT_DMA_BUG);
15147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15148 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15154 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15156 if (tg3_flag(tp, 57765_PLUS) &&
15157 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15158 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15160 if (!tg3_flag(tp, 5705_PLUS) ||
15161 tg3_flag(tp, 5780_CLASS) ||
15162 tg3_flag(tp, USE_JUMBO_BDFLAG))
15163 tg3_flag_set(tp, JUMBO_CAPABLE);
15165 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15168 if (pci_is_pcie(tp->pdev)) {
15171 tg3_flag_set(tp, PCI_EXPRESS);
15173 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15174 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15175 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15177 tg3_flag_clear(tp, HW_TSO_2);
15178 tg3_flag_clear(tp, TSO_CAPABLE);
15180 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15181 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15182 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15183 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15184 tg3_flag_set(tp, CLKREQ_BUG);
15185 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15186 tg3_flag_set(tp, L1PLLPD_EN);
15188 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15189 /* BCM5785 devices are effectively PCIe devices, and should
15190 * follow PCIe codepaths, but do not have a PCIe capabilities
15193 tg3_flag_set(tp, PCI_EXPRESS);
15194 } else if (!tg3_flag(tp, 5705_PLUS) ||
15195 tg3_flag(tp, 5780_CLASS)) {
15196 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15197 if (!tp->pcix_cap) {
15198 dev_err(&tp->pdev->dev,
15199 "Cannot find PCI-X capability, aborting\n");
15203 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15204 tg3_flag_set(tp, PCIX_MODE);
15207 /* If we have an AMD 762 or VIA K8T800 chipset, write
15208 * reordering to the mailbox registers done by the host
15209 * controller can cause major troubles. We read back from
15210 * every mailbox register write to force the writes to be
15211 * posted to the chip in order.
15213 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15214 !tg3_flag(tp, PCI_EXPRESS))
15215 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15217 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15218 &tp->pci_cacheline_sz);
15219 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15220 &tp->pci_lat_timer);
15221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15222 tp->pci_lat_timer < 64) {
15223 tp->pci_lat_timer = 64;
15224 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15225 tp->pci_lat_timer);
15228 /* Important! -- It is critical that the PCI-X hw workaround
15229 * situation is decided before the first MMIO register access.
15231 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15232 /* 5700 BX chips need to have their TX producer index
15233 * mailboxes written twice to workaround a bug.
15235 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15237 /* If we are in PCI-X mode, enable register write workaround.
15239 * The workaround is to use indirect register accesses
15240 * for all chip writes not to mailbox registers.
15242 if (tg3_flag(tp, PCIX_MODE)) {
15245 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15247 /* The chip can have it's power management PCI config
15248 * space registers clobbered due to this bug.
15249 * So explicitly force the chip into D0 here.
15251 pci_read_config_dword(tp->pdev,
15252 tp->pm_cap + PCI_PM_CTRL,
15254 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15255 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15256 pci_write_config_dword(tp->pdev,
15257 tp->pm_cap + PCI_PM_CTRL,
15260 /* Also, force SERR#/PERR# in PCI command. */
15261 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15262 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15263 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15267 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15268 tg3_flag_set(tp, PCI_HIGH_SPEED);
15269 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15270 tg3_flag_set(tp, PCI_32BIT);
15272 /* Chip-specific fixup from Broadcom driver */
15273 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15274 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15275 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15276 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15279 /* Default fast path register access methods */
15280 tp->read32 = tg3_read32;
15281 tp->write32 = tg3_write32;
15282 tp->read32_mbox = tg3_read32;
15283 tp->write32_mbox = tg3_write32;
15284 tp->write32_tx_mbox = tg3_write32;
15285 tp->write32_rx_mbox = tg3_write32;
15287 /* Various workaround register access methods */
15288 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15289 tp->write32 = tg3_write_indirect_reg32;
15290 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15291 (tg3_flag(tp, PCI_EXPRESS) &&
15292 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15294 * Back to back register writes can cause problems on these
15295 * chips, the workaround is to read back all reg writes
15296 * except those to mailbox regs.
15298 * See tg3_write_indirect_reg32().
15300 tp->write32 = tg3_write_flush_reg32;
15303 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15304 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15305 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15306 tp->write32_rx_mbox = tg3_write_flush_reg32;
15309 if (tg3_flag(tp, ICH_WORKAROUND)) {
15310 tp->read32 = tg3_read_indirect_reg32;
15311 tp->write32 = tg3_write_indirect_reg32;
15312 tp->read32_mbox = tg3_read_indirect_mbox;
15313 tp->write32_mbox = tg3_write_indirect_mbox;
15314 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15315 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15320 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15321 pci_cmd &= ~PCI_COMMAND_MEMORY;
15322 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15325 tp->read32_mbox = tg3_read32_mbox_5906;
15326 tp->write32_mbox = tg3_write32_mbox_5906;
15327 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15328 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15331 if (tp->write32 == tg3_write_indirect_reg32 ||
15332 (tg3_flag(tp, PCIX_MODE) &&
15333 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15335 tg3_flag_set(tp, SRAM_USE_CONFIG);
15337 /* The memory arbiter has to be enabled in order for SRAM accesses
15338 * to succeed. Normally on powerup the tg3 chip firmware will make
15339 * sure it is enabled, but other entities such as system netboot
15340 * code might disable it.
15342 val = tr32(MEMARB_MODE);
15343 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15345 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15347 tg3_flag(tp, 5780_CLASS)) {
15348 if (tg3_flag(tp, PCIX_MODE)) {
15349 pci_read_config_dword(tp->pdev,
15350 tp->pcix_cap + PCI_X_STATUS,
15352 tp->pci_fn = val & 0x7;
15354 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
15355 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15356 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15357 NIC_SRAM_CPMUSTAT_SIG) {
15358 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
15359 tp->pci_fn = tp->pci_fn ? 1 : 0;
15361 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15362 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15363 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15364 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15365 NIC_SRAM_CPMUSTAT_SIG) {
15366 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15367 TG3_CPMU_STATUS_FSHFT_5719;
15371 /* Get eeprom hw config before calling tg3_set_power_state().
15372 * In particular, the TG3_FLAG_IS_NIC flag must be
15373 * determined before calling tg3_set_power_state() so that
15374 * we know whether or not to switch out of Vaux power.
15375 * When the flag is set, it means that GPIO1 is used for eeprom
15376 * write protect and also implies that it is a LOM where GPIOs
15377 * are not used to switch power.
15379 tg3_get_eeprom_hw_cfg(tp);
15381 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15382 tg3_flag_clear(tp, TSO_CAPABLE);
15383 tg3_flag_clear(tp, TSO_BUG);
15384 tp->fw_needed = NULL;
15387 if (tg3_flag(tp, ENABLE_APE)) {
15388 /* Allow reads and writes to the
15389 * APE register and memory space.
15391 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15392 PCISTATE_ALLOW_APE_SHMEM_WR |
15393 PCISTATE_ALLOW_APE_PSPACE_WR;
15394 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15397 tg3_ape_lock_init(tp);
15400 /* Set up tp->grc_local_ctrl before calling
15401 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15402 * will bring 5700's external PHY out of reset.
15403 * It is also used as eeprom write protect on LOMs.
15405 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15406 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15407 tg3_flag(tp, EEPROM_WRITE_PROT))
15408 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15409 GRC_LCLCTRL_GPIO_OUTPUT1);
15410 /* Unused GPIO3 must be driven as output on 5752 because there
15411 * are no pull-up resistors on unused GPIO pins.
15413 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15414 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15418 tg3_flag(tp, 57765_CLASS))
15419 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15421 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15422 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15423 /* Turn off the debug UART. */
15424 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15425 if (tg3_flag(tp, IS_NIC))
15426 /* Keep VMain power. */
15427 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15428 GRC_LCLCTRL_GPIO_OUTPUT0;
15431 /* Switch out of Vaux if it is a NIC */
15432 tg3_pwrsrc_switch_to_vmain(tp);
15434 /* Derive initial jumbo mode from MTU assigned in
15435 * ether_setup() via the alloc_etherdev() call
15437 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15438 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15440 /* Determine WakeOnLan speed to use. */
15441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15442 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15443 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15444 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15445 tg3_flag_clear(tp, WOL_SPEED_100MB);
15447 tg3_flag_set(tp, WOL_SPEED_100MB);
15450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15451 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15453 /* A few boards don't want Ethernet@WireSpeed phy feature */
15454 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15455 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15456 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15457 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15458 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15459 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15460 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15462 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15463 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15464 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15465 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15466 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15468 if (tg3_flag(tp, 5705_PLUS) &&
15469 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15470 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15471 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15472 !tg3_flag(tp, 57765_PLUS)) {
15473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15477 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15478 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15479 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15480 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15481 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15483 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15487 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15488 tp->phy_otp = tg3_read_otp_phycfg(tp);
15489 if (tp->phy_otp == 0)
15490 tp->phy_otp = TG3_OTP_DEFAULT;
15493 if (tg3_flag(tp, CPMU_PRESENT))
15494 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15496 tp->mi_mode = MAC_MI_MODE_BASE;
15498 tp->coalesce_mode = 0;
15499 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15500 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15501 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15503 /* Set these bits to enable statistics workaround. */
15504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15505 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15506 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15507 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15508 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15513 tg3_flag_set(tp, USE_PHYLIB);
15515 err = tg3_mdio_init(tp);
15519 /* Initialize data/descriptor byte/word swapping. */
15520 val = tr32(GRC_MODE);
15521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15523 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15524 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15525 GRC_MODE_B2HRX_ENABLE |
15526 GRC_MODE_HTX2B_ENABLE |
15527 GRC_MODE_HOST_STACKUP);
15529 val &= GRC_MODE_HOST_STACKUP;
15531 tw32(GRC_MODE, val | tp->grc_mode);
15533 tg3_switch_clocks(tp);
15535 /* Clear this out for sanity. */
15536 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15538 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15540 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15541 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15542 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15544 if (chiprevid == CHIPREV_ID_5701_A0 ||
15545 chiprevid == CHIPREV_ID_5701_B0 ||
15546 chiprevid == CHIPREV_ID_5701_B2 ||
15547 chiprevid == CHIPREV_ID_5701_B5) {
15548 void __iomem *sram_base;
15550 /* Write some dummy words into the SRAM status block
15551 * area, see if it reads back correctly. If the return
15552 * value is bad, force enable the PCIX workaround.
15554 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15556 writel(0x00000000, sram_base);
15557 writel(0x00000000, sram_base + 4);
15558 writel(0xffffffff, sram_base + 4);
15559 if (readl(sram_base) != 0x00000000)
15560 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15565 tg3_nvram_init(tp);
15567 grc_misc_cfg = tr32(GRC_MISC_CFG);
15568 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15571 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15572 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15573 tg3_flag_set(tp, IS_5788);
15575 if (!tg3_flag(tp, IS_5788) &&
15576 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15577 tg3_flag_set(tp, TAGGED_STATUS);
15578 if (tg3_flag(tp, TAGGED_STATUS)) {
15579 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15580 HOSTCC_MODE_CLRTICK_TXBD);
15582 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15583 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15584 tp->misc_host_ctrl);
15587 /* Preserve the APE MAC_MODE bits */
15588 if (tg3_flag(tp, ENABLE_APE))
15589 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15593 if (tg3_10_100_only_device(tp, ent))
15594 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15596 err = tg3_phy_probe(tp);
15598 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15599 /* ... but do not return immediately ... */
15604 tg3_read_fw_ver(tp);
15606 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15607 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15610 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15612 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15615 /* 5700 {AX,BX} chips have a broken status block link
15616 * change bit implementation, so we must use the
15617 * status register in those cases.
15619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15620 tg3_flag_set(tp, USE_LINKCHG_REG);
15622 tg3_flag_clear(tp, USE_LINKCHG_REG);
15624 /* The led_ctrl is set during tg3_phy_probe, here we might
15625 * have to force the link status polling mechanism based
15626 * upon subsystem IDs.
15628 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15630 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15631 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15632 tg3_flag_set(tp, USE_LINKCHG_REG);
15635 /* For all SERDES we poll the MAC status register. */
15636 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15637 tg3_flag_set(tp, POLL_SERDES);
15639 tg3_flag_clear(tp, POLL_SERDES);
15641 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15642 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15644 tg3_flag(tp, PCIX_MODE)) {
15645 tp->rx_offset = NET_SKB_PAD;
15646 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15647 tp->rx_copy_thresh = ~(u16)0;
15651 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15652 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15653 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15655 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15657 /* Increment the rx prod index on the rx std ring by at most
15658 * 8 for these chips to workaround hw errata.
15660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15663 tp->rx_std_max_post = 8;
15665 if (tg3_flag(tp, ASPM_WORKAROUND))
15666 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15667 PCIE_PWR_MGMT_L1_THRESH_MSK;
15672 #ifdef CONFIG_SPARC
15673 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15675 struct net_device *dev = tp->dev;
15676 struct pci_dev *pdev = tp->pdev;
15677 struct device_node *dp = pci_device_to_OF_node(pdev);
15678 const unsigned char *addr;
15681 addr = of_get_property(dp, "local-mac-address", &len);
15682 if (addr && len == 6) {
15683 memcpy(dev->dev_addr, addr, 6);
15684 memcpy(dev->perm_addr, dev->dev_addr, 6);
15690 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15692 struct net_device *dev = tp->dev;
15694 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15695 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15700 static int tg3_get_device_address(struct tg3 *tp)
15702 struct net_device *dev = tp->dev;
15703 u32 hi, lo, mac_offset;
15706 #ifdef CONFIG_SPARC
15707 if (!tg3_get_macaddr_sparc(tp))
15712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15713 tg3_flag(tp, 5780_CLASS)) {
15714 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15716 if (tg3_nvram_lock(tp))
15717 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15719 tg3_nvram_unlock(tp);
15720 } else if (tg3_flag(tp, 5717_PLUS)) {
15721 if (tp->pci_fn & 1)
15723 if (tp->pci_fn > 1)
15724 mac_offset += 0x18c;
15725 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15728 /* First try to get it from MAC address mailbox. */
15729 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15730 if ((hi >> 16) == 0x484b) {
15731 dev->dev_addr[0] = (hi >> 8) & 0xff;
15732 dev->dev_addr[1] = (hi >> 0) & 0xff;
15734 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15735 dev->dev_addr[2] = (lo >> 24) & 0xff;
15736 dev->dev_addr[3] = (lo >> 16) & 0xff;
15737 dev->dev_addr[4] = (lo >> 8) & 0xff;
15738 dev->dev_addr[5] = (lo >> 0) & 0xff;
15740 /* Some old bootcode may report a 0 MAC address in SRAM */
15741 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15744 /* Next, try NVRAM. */
15745 if (!tg3_flag(tp, NO_NVRAM) &&
15746 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15747 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15748 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15749 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15751 /* Finally just fetch it out of the MAC control regs. */
15753 hi = tr32(MAC_ADDR_0_HIGH);
15754 lo = tr32(MAC_ADDR_0_LOW);
15756 dev->dev_addr[5] = lo & 0xff;
15757 dev->dev_addr[4] = (lo >> 8) & 0xff;
15758 dev->dev_addr[3] = (lo >> 16) & 0xff;
15759 dev->dev_addr[2] = (lo >> 24) & 0xff;
15760 dev->dev_addr[1] = hi & 0xff;
15761 dev->dev_addr[0] = (hi >> 8) & 0xff;
15765 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15766 #ifdef CONFIG_SPARC
15767 if (!tg3_get_default_macaddr_sparc(tp))
15772 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15776 #define BOUNDARY_SINGLE_CACHELINE 1
15777 #define BOUNDARY_MULTI_CACHELINE 2
15779 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15781 int cacheline_size;
15785 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15787 cacheline_size = 1024;
15789 cacheline_size = (int) byte * 4;
15791 /* On 5703 and later chips, the boundary bits have no
15794 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15795 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15796 !tg3_flag(tp, PCI_EXPRESS))
15799 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15800 goal = BOUNDARY_MULTI_CACHELINE;
15802 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15803 goal = BOUNDARY_SINGLE_CACHELINE;
15809 if (tg3_flag(tp, 57765_PLUS)) {
15810 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15817 /* PCI controllers on most RISC systems tend to disconnect
15818 * when a device tries to burst across a cache-line boundary.
15819 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15821 * Unfortunately, for PCI-E there are only limited
15822 * write-side controls for this, and thus for reads
15823 * we will still get the disconnects. We'll also waste
15824 * these PCI cycles for both read and write for chips
15825 * other than 5700 and 5701 which do not implement the
15828 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15829 switch (cacheline_size) {
15834 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15835 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15836 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15838 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15839 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15844 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15845 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15849 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15850 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15853 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15854 switch (cacheline_size) {
15858 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15859 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15860 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15866 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15867 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15871 switch (cacheline_size) {
15873 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15874 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15875 DMA_RWCTRL_WRITE_BNDRY_16);
15880 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15881 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15882 DMA_RWCTRL_WRITE_BNDRY_32);
15887 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15888 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15889 DMA_RWCTRL_WRITE_BNDRY_64);
15894 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15895 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15896 DMA_RWCTRL_WRITE_BNDRY_128);
15901 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15902 DMA_RWCTRL_WRITE_BNDRY_256);
15905 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15906 DMA_RWCTRL_WRITE_BNDRY_512);
15910 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15911 DMA_RWCTRL_WRITE_BNDRY_1024);
15920 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15921 int size, int to_device)
15923 struct tg3_internal_buffer_desc test_desc;
15924 u32 sram_dma_descs;
15927 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15929 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15930 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15931 tw32(RDMAC_STATUS, 0);
15932 tw32(WDMAC_STATUS, 0);
15934 tw32(BUFMGR_MODE, 0);
15935 tw32(FTQ_RESET, 0);
15937 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15938 test_desc.addr_lo = buf_dma & 0xffffffff;
15939 test_desc.nic_mbuf = 0x00002100;
15940 test_desc.len = size;
15943 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15944 * the *second* time the tg3 driver was getting loaded after an
15947 * Broadcom tells me:
15948 * ...the DMA engine is connected to the GRC block and a DMA
15949 * reset may affect the GRC block in some unpredictable way...
15950 * The behavior of resets to individual blocks has not been tested.
15952 * Broadcom noted the GRC reset will also reset all sub-components.
15955 test_desc.cqid_sqid = (13 << 8) | 2;
15957 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15960 test_desc.cqid_sqid = (16 << 8) | 7;
15962 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15965 test_desc.flags = 0x00000005;
15967 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15970 val = *(((u32 *)&test_desc) + i);
15971 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15972 sram_dma_descs + (i * sizeof(u32)));
15973 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15975 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15978 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15980 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15983 for (i = 0; i < 40; i++) {
15987 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15989 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15990 if ((val & 0xffff) == sram_dma_descs) {
16001 #define TEST_BUFFER_SIZE 0x2000
16003 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16004 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16008 static int tg3_test_dma(struct tg3 *tp)
16010 dma_addr_t buf_dma;
16011 u32 *buf, saved_dma_rwctrl;
16014 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16015 &buf_dma, GFP_KERNEL);
16021 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16022 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16024 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16026 if (tg3_flag(tp, 57765_PLUS))
16029 if (tg3_flag(tp, PCI_EXPRESS)) {
16030 /* DMA read watermark not used on PCIE */
16031 tp->dma_rwctrl |= 0x00180000;
16032 } else if (!tg3_flag(tp, PCIX_MODE)) {
16033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16035 tp->dma_rwctrl |= 0x003f0000;
16037 tp->dma_rwctrl |= 0x003f000f;
16039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16041 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16042 u32 read_water = 0x7;
16044 /* If the 5704 is behind the EPB bridge, we can
16045 * do the less restrictive ONE_DMA workaround for
16046 * better performance.
16048 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16050 tp->dma_rwctrl |= 0x8000;
16051 else if (ccval == 0x6 || ccval == 0x7)
16052 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16056 /* Set bit 23 to enable PCIX hw bug fix */
16058 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16059 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16061 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16062 /* 5780 always in PCIX mode */
16063 tp->dma_rwctrl |= 0x00144000;
16064 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16065 /* 5714 always in PCIX mode */
16066 tp->dma_rwctrl |= 0x00148000;
16068 tp->dma_rwctrl |= 0x001b000f;
16072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16074 tp->dma_rwctrl &= 0xfffffff0;
16076 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16078 /* Remove this if it causes problems for some boards. */
16079 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16081 /* On 5700/5701 chips, we need to set this bit.
16082 * Otherwise the chip will issue cacheline transactions
16083 * to streamable DMA memory with not all the byte
16084 * enables turned on. This is an error on several
16085 * RISC PCI controllers, in particular sparc64.
16087 * On 5703/5704 chips, this bit has been reassigned
16088 * a different meaning. In particular, it is used
16089 * on those chips to enable a PCI-X workaround.
16091 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16094 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16097 /* Unneeded, already done by tg3_get_invariants. */
16098 tg3_switch_clocks(tp);
16101 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16102 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16105 /* It is best to perform DMA test with maximum write burst size
16106 * to expose the 5700/5701 write DMA bug.
16108 saved_dma_rwctrl = tp->dma_rwctrl;
16109 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16110 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16115 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16118 /* Send the buffer to the chip. */
16119 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16121 dev_err(&tp->pdev->dev,
16122 "%s: Buffer write failed. err = %d\n",
16128 /* validate data reached card RAM correctly. */
16129 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16131 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16132 if (le32_to_cpu(val) != p[i]) {
16133 dev_err(&tp->pdev->dev,
16134 "%s: Buffer corrupted on device! "
16135 "(%d != %d)\n", __func__, val, i);
16136 /* ret = -ENODEV here? */
16141 /* Now read it back. */
16142 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16144 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16145 "err = %d\n", __func__, ret);
16150 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16154 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16155 DMA_RWCTRL_WRITE_BNDRY_16) {
16156 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16157 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16158 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16161 dev_err(&tp->pdev->dev,
16162 "%s: Buffer corrupted on read back! "
16163 "(%d != %d)\n", __func__, p[i], i);
16169 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16175 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16176 DMA_RWCTRL_WRITE_BNDRY_16) {
16177 /* DMA test passed without adjusting DMA boundary,
16178 * now look for chipsets that are known to expose the
16179 * DMA bug without failing the test.
16181 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16182 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16183 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16185 /* Safe to use the calculated DMA boundary. */
16186 tp->dma_rwctrl = saved_dma_rwctrl;
16189 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16193 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16198 static void tg3_init_bufmgr_config(struct tg3 *tp)
16200 if (tg3_flag(tp, 57765_PLUS)) {
16201 tp->bufmgr_config.mbuf_read_dma_low_water =
16202 DEFAULT_MB_RDMA_LOW_WATER_5705;
16203 tp->bufmgr_config.mbuf_mac_rx_low_water =
16204 DEFAULT_MB_MACRX_LOW_WATER_57765;
16205 tp->bufmgr_config.mbuf_high_water =
16206 DEFAULT_MB_HIGH_WATER_57765;
16208 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16209 DEFAULT_MB_RDMA_LOW_WATER_5705;
16210 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16211 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16212 tp->bufmgr_config.mbuf_high_water_jumbo =
16213 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16214 } else if (tg3_flag(tp, 5705_PLUS)) {
16215 tp->bufmgr_config.mbuf_read_dma_low_water =
16216 DEFAULT_MB_RDMA_LOW_WATER_5705;
16217 tp->bufmgr_config.mbuf_mac_rx_low_water =
16218 DEFAULT_MB_MACRX_LOW_WATER_5705;
16219 tp->bufmgr_config.mbuf_high_water =
16220 DEFAULT_MB_HIGH_WATER_5705;
16221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16222 tp->bufmgr_config.mbuf_mac_rx_low_water =
16223 DEFAULT_MB_MACRX_LOW_WATER_5906;
16224 tp->bufmgr_config.mbuf_high_water =
16225 DEFAULT_MB_HIGH_WATER_5906;
16228 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16229 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16230 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16231 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16232 tp->bufmgr_config.mbuf_high_water_jumbo =
16233 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16235 tp->bufmgr_config.mbuf_read_dma_low_water =
16236 DEFAULT_MB_RDMA_LOW_WATER;
16237 tp->bufmgr_config.mbuf_mac_rx_low_water =
16238 DEFAULT_MB_MACRX_LOW_WATER;
16239 tp->bufmgr_config.mbuf_high_water =
16240 DEFAULT_MB_HIGH_WATER;
16242 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16243 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16244 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16245 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16246 tp->bufmgr_config.mbuf_high_water_jumbo =
16247 DEFAULT_MB_HIGH_WATER_JUMBO;
16250 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16251 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16254 static char *tg3_phy_string(struct tg3 *tp)
16256 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16257 case TG3_PHY_ID_BCM5400: return "5400";
16258 case TG3_PHY_ID_BCM5401: return "5401";
16259 case TG3_PHY_ID_BCM5411: return "5411";
16260 case TG3_PHY_ID_BCM5701: return "5701";
16261 case TG3_PHY_ID_BCM5703: return "5703";
16262 case TG3_PHY_ID_BCM5704: return "5704";
16263 case TG3_PHY_ID_BCM5705: return "5705";
16264 case TG3_PHY_ID_BCM5750: return "5750";
16265 case TG3_PHY_ID_BCM5752: return "5752";
16266 case TG3_PHY_ID_BCM5714: return "5714";
16267 case TG3_PHY_ID_BCM5780: return "5780";
16268 case TG3_PHY_ID_BCM5755: return "5755";
16269 case TG3_PHY_ID_BCM5787: return "5787";
16270 case TG3_PHY_ID_BCM5784: return "5784";
16271 case TG3_PHY_ID_BCM5756: return "5722/5756";
16272 case TG3_PHY_ID_BCM5906: return "5906";
16273 case TG3_PHY_ID_BCM5761: return "5761";
16274 case TG3_PHY_ID_BCM5718C: return "5718C";
16275 case TG3_PHY_ID_BCM5718S: return "5718S";
16276 case TG3_PHY_ID_BCM57765: return "57765";
16277 case TG3_PHY_ID_BCM5719C: return "5719C";
16278 case TG3_PHY_ID_BCM5720C: return "5720C";
16279 case TG3_PHY_ID_BCM5762: return "5762C";
16280 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16281 case 0: return "serdes";
16282 default: return "unknown";
16286 static char *tg3_bus_string(struct tg3 *tp, char *str)
16288 if (tg3_flag(tp, PCI_EXPRESS)) {
16289 strcpy(str, "PCI Express");
16291 } else if (tg3_flag(tp, PCIX_MODE)) {
16292 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16294 strcpy(str, "PCIX:");
16296 if ((clock_ctrl == 7) ||
16297 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16298 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16299 strcat(str, "133MHz");
16300 else if (clock_ctrl == 0)
16301 strcat(str, "33MHz");
16302 else if (clock_ctrl == 2)
16303 strcat(str, "50MHz");
16304 else if (clock_ctrl == 4)
16305 strcat(str, "66MHz");
16306 else if (clock_ctrl == 6)
16307 strcat(str, "100MHz");
16309 strcpy(str, "PCI:");
16310 if (tg3_flag(tp, PCI_HIGH_SPEED))
16311 strcat(str, "66MHz");
16313 strcat(str, "33MHz");
16315 if (tg3_flag(tp, PCI_32BIT))
16316 strcat(str, ":32-bit");
16318 strcat(str, ":64-bit");
16322 static void tg3_init_coal(struct tg3 *tp)
16324 struct ethtool_coalesce *ec = &tp->coal;
16326 memset(ec, 0, sizeof(*ec));
16327 ec->cmd = ETHTOOL_GCOALESCE;
16328 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16329 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16330 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16331 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16332 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16333 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16334 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16335 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16336 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16338 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16339 HOSTCC_MODE_CLRTICK_TXBD)) {
16340 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16341 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16342 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16343 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16346 if (tg3_flag(tp, 5705_PLUS)) {
16347 ec->rx_coalesce_usecs_irq = 0;
16348 ec->tx_coalesce_usecs_irq = 0;
16349 ec->stats_block_coalesce_usecs = 0;
16353 static int tg3_init_one(struct pci_dev *pdev,
16354 const struct pci_device_id *ent)
16356 struct net_device *dev;
16358 int i, err, pm_cap;
16359 u32 sndmbx, rcvmbx, intmbx;
16361 u64 dma_mask, persist_dma_mask;
16362 netdev_features_t features = 0;
16364 printk_once(KERN_INFO "%s\n", version);
16366 err = pci_enable_device(pdev);
16368 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16372 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16374 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16375 goto err_out_disable_pdev;
16378 pci_set_master(pdev);
16380 /* Find power-management capability. */
16381 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16383 dev_err(&pdev->dev,
16384 "Cannot find Power Management capability, aborting\n");
16386 goto err_out_free_res;
16389 err = pci_set_power_state(pdev, PCI_D0);
16391 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16392 goto err_out_free_res;
16395 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16398 goto err_out_power_down;
16401 SET_NETDEV_DEV(dev, &pdev->dev);
16403 tp = netdev_priv(dev);
16406 tp->pm_cap = pm_cap;
16407 tp->rx_mode = TG3_DEF_RX_MODE;
16408 tp->tx_mode = TG3_DEF_TX_MODE;
16411 tp->msg_enable = tg3_debug;
16413 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16415 /* The word/byte swap controls here control register access byte
16416 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16419 tp->misc_host_ctrl =
16420 MISC_HOST_CTRL_MASK_PCI_INT |
16421 MISC_HOST_CTRL_WORD_SWAP |
16422 MISC_HOST_CTRL_INDIR_ACCESS |
16423 MISC_HOST_CTRL_PCISTATE_RW;
16425 /* The NONFRM (non-frame) byte/word swap controls take effect
16426 * on descriptor entries, anything which isn't packet data.
16428 * The StrongARM chips on the board (one for tx, one for rx)
16429 * are running in big-endian mode.
16431 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16432 GRC_MODE_WSWAP_NONFRM_DATA);
16433 #ifdef __BIG_ENDIAN
16434 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16436 spin_lock_init(&tp->lock);
16437 spin_lock_init(&tp->indirect_lock);
16438 INIT_WORK(&tp->reset_task, tg3_reset_task);
16440 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16442 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16444 goto err_out_free_dev;
16447 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16448 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16449 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16450 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16451 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16452 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16453 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16454 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16455 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16456 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16458 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16459 tg3_flag_set(tp, ENABLE_APE);
16460 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16461 if (!tp->aperegs) {
16462 dev_err(&pdev->dev,
16463 "Cannot map APE registers, aborting\n");
16465 goto err_out_iounmap;
16469 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16470 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16472 dev->ethtool_ops = &tg3_ethtool_ops;
16473 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16474 dev->netdev_ops = &tg3_netdev_ops;
16475 dev->irq = pdev->irq;
16477 err = tg3_get_invariants(tp, ent);
16479 dev_err(&pdev->dev,
16480 "Problem fetching invariants of chip, aborting\n");
16481 goto err_out_apeunmap;
16484 /* The EPB bridge inside 5714, 5715, and 5780 and any
16485 * device behind the EPB cannot support DMA addresses > 40-bit.
16486 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16487 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16488 * do DMA address check in tg3_start_xmit().
16490 if (tg3_flag(tp, IS_5788))
16491 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16492 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16493 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16494 #ifdef CONFIG_HIGHMEM
16495 dma_mask = DMA_BIT_MASK(64);
16498 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16500 /* Configure DMA attributes. */
16501 if (dma_mask > DMA_BIT_MASK(32)) {
16502 err = pci_set_dma_mask(pdev, dma_mask);
16504 features |= NETIF_F_HIGHDMA;
16505 err = pci_set_consistent_dma_mask(pdev,
16508 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16509 "DMA for consistent allocations\n");
16510 goto err_out_apeunmap;
16514 if (err || dma_mask == DMA_BIT_MASK(32)) {
16515 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16517 dev_err(&pdev->dev,
16518 "No usable DMA configuration, aborting\n");
16519 goto err_out_apeunmap;
16523 tg3_init_bufmgr_config(tp);
16525 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16527 /* 5700 B0 chips do not support checksumming correctly due
16528 * to hardware bugs.
16530 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16531 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16533 if (tg3_flag(tp, 5755_PLUS))
16534 features |= NETIF_F_IPV6_CSUM;
16537 /* TSO is on by default on chips that support hardware TSO.
16538 * Firmware TSO on older chips gives lower performance, so it
16539 * is off by default, but can be enabled using ethtool.
16541 if ((tg3_flag(tp, HW_TSO_1) ||
16542 tg3_flag(tp, HW_TSO_2) ||
16543 tg3_flag(tp, HW_TSO_3)) &&
16544 (features & NETIF_F_IP_CSUM))
16545 features |= NETIF_F_TSO;
16546 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16547 if (features & NETIF_F_IPV6_CSUM)
16548 features |= NETIF_F_TSO6;
16549 if (tg3_flag(tp, HW_TSO_3) ||
16550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16551 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16552 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16555 features |= NETIF_F_TSO_ECN;
16558 dev->features |= features;
16559 dev->vlan_features |= features;
16562 * Add loopback capability only for a subset of devices that support
16563 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16564 * loopback for the remaining devices.
16566 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16567 !tg3_flag(tp, CPMU_PRESENT))
16568 /* Add the loopback capability */
16569 features |= NETIF_F_LOOPBACK;
16571 dev->hw_features |= features;
16573 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16574 !tg3_flag(tp, TSO_CAPABLE) &&
16575 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16576 tg3_flag_set(tp, MAX_RXPEND_64);
16577 tp->rx_pending = 63;
16580 err = tg3_get_device_address(tp);
16582 dev_err(&pdev->dev,
16583 "Could not obtain valid ethernet address, aborting\n");
16584 goto err_out_apeunmap;
16588 * Reset chip in case UNDI or EFI driver did not shutdown
16589 * DMA self test will enable WDMAC and we'll see (spurious)
16590 * pending DMA on the PCI bus at that point.
16592 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16593 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16594 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16595 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16598 err = tg3_test_dma(tp);
16600 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16601 goto err_out_apeunmap;
16604 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16605 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16606 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16607 for (i = 0; i < tp->irq_max; i++) {
16608 struct tg3_napi *tnapi = &tp->napi[i];
16611 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16613 tnapi->int_mbox = intmbx;
16619 tnapi->consmbox = rcvmbx;
16620 tnapi->prodmbox = sndmbx;
16623 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16625 tnapi->coal_now = HOSTCC_MODE_NOW;
16627 if (!tg3_flag(tp, SUPPORT_MSIX))
16631 * If we support MSIX, we'll be using RSS. If we're using
16632 * RSS, the first vector only handles link interrupts and the
16633 * remaining vectors handle rx and tx interrupts. Reuse the
16634 * mailbox values for the next iteration. The values we setup
16635 * above are still useful for the single vectored mode.
16650 pci_set_drvdata(pdev, dev);
16652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16655 tg3_flag_set(tp, PTP_CAPABLE);
16657 if (tg3_flag(tp, 5717_PLUS)) {
16658 /* Resume a low-power mode */
16659 tg3_frob_aux_power(tp, false);
16662 tg3_timer_init(tp);
16664 err = register_netdev(dev);
16666 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16667 goto err_out_apeunmap;
16670 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16671 tp->board_part_number,
16672 tp->pci_chip_rev_id,
16673 tg3_bus_string(tp, str),
16676 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16677 struct phy_device *phydev;
16678 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16680 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16681 phydev->drv->name, dev_name(&phydev->dev));
16685 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16686 ethtype = "10/100Base-TX";
16687 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16688 ethtype = "1000Base-SX";
16690 ethtype = "10/100/1000Base-T";
16692 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16693 "(WireSpeed[%d], EEE[%d])\n",
16694 tg3_phy_string(tp), ethtype,
16695 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16696 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16699 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16700 (dev->features & NETIF_F_RXCSUM) != 0,
16701 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16702 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16703 tg3_flag(tp, ENABLE_ASF) != 0,
16704 tg3_flag(tp, TSO_CAPABLE) != 0);
16705 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16707 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16708 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16710 pci_save_state(pdev);
16716 iounmap(tp->aperegs);
16717 tp->aperegs = NULL;
16729 err_out_power_down:
16730 pci_set_power_state(pdev, PCI_D3hot);
16733 pci_release_regions(pdev);
16735 err_out_disable_pdev:
16736 pci_disable_device(pdev);
16737 pci_set_drvdata(pdev, NULL);
16741 static void tg3_remove_one(struct pci_dev *pdev)
16743 struct net_device *dev = pci_get_drvdata(pdev);
16746 struct tg3 *tp = netdev_priv(dev);
16748 release_firmware(tp->fw);
16750 tg3_reset_task_cancel(tp);
16752 if (tg3_flag(tp, USE_PHYLIB)) {
16757 unregister_netdev(dev);
16759 iounmap(tp->aperegs);
16760 tp->aperegs = NULL;
16767 pci_release_regions(pdev);
16768 pci_disable_device(pdev);
16769 pci_set_drvdata(pdev, NULL);
16773 #ifdef CONFIG_PM_SLEEP
16774 static int tg3_suspend(struct device *device)
16776 struct pci_dev *pdev = to_pci_dev(device);
16777 struct net_device *dev = pci_get_drvdata(pdev);
16778 struct tg3 *tp = netdev_priv(dev);
16781 if (!netif_running(dev))
16784 tg3_reset_task_cancel(tp);
16786 tg3_netif_stop(tp);
16788 tg3_timer_stop(tp);
16790 tg3_full_lock(tp, 1);
16791 tg3_disable_ints(tp);
16792 tg3_full_unlock(tp);
16794 netif_device_detach(dev);
16796 tg3_full_lock(tp, 0);
16797 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16798 tg3_flag_clear(tp, INIT_COMPLETE);
16799 tg3_full_unlock(tp);
16801 err = tg3_power_down_prepare(tp);
16805 tg3_full_lock(tp, 0);
16807 tg3_flag_set(tp, INIT_COMPLETE);
16808 err2 = tg3_restart_hw(tp, 1);
16812 tg3_timer_start(tp);
16814 netif_device_attach(dev);
16815 tg3_netif_start(tp);
16818 tg3_full_unlock(tp);
16827 static int tg3_resume(struct device *device)
16829 struct pci_dev *pdev = to_pci_dev(device);
16830 struct net_device *dev = pci_get_drvdata(pdev);
16831 struct tg3 *tp = netdev_priv(dev);
16834 if (!netif_running(dev))
16837 netif_device_attach(dev);
16839 tg3_full_lock(tp, 0);
16841 tg3_flag_set(tp, INIT_COMPLETE);
16842 err = tg3_restart_hw(tp, 1);
16846 tg3_timer_start(tp);
16848 tg3_netif_start(tp);
16851 tg3_full_unlock(tp);
16859 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16860 #define TG3_PM_OPS (&tg3_pm_ops)
16864 #define TG3_PM_OPS NULL
16866 #endif /* CONFIG_PM_SLEEP */
16869 * tg3_io_error_detected - called when PCI error is detected
16870 * @pdev: Pointer to PCI device
16871 * @state: The current pci connection state
16873 * This function is called after a PCI bus error affecting
16874 * this device has been detected.
16876 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16877 pci_channel_state_t state)
16879 struct net_device *netdev = pci_get_drvdata(pdev);
16880 struct tg3 *tp = netdev_priv(netdev);
16881 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16883 netdev_info(netdev, "PCI I/O error detected\n");
16887 if (!netif_running(netdev))
16892 tg3_netif_stop(tp);
16894 tg3_timer_stop(tp);
16896 /* Want to make sure that the reset task doesn't run */
16897 tg3_reset_task_cancel(tp);
16899 netif_device_detach(netdev);
16901 /* Clean up software state, even if MMIO is blocked */
16902 tg3_full_lock(tp, 0);
16903 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16904 tg3_full_unlock(tp);
16907 if (state == pci_channel_io_perm_failure)
16908 err = PCI_ERS_RESULT_DISCONNECT;
16910 pci_disable_device(pdev);
16918 * tg3_io_slot_reset - called after the pci bus has been reset.
16919 * @pdev: Pointer to PCI device
16921 * Restart the card from scratch, as if from a cold-boot.
16922 * At this point, the card has exprienced a hard reset,
16923 * followed by fixups by BIOS, and has its config space
16924 * set up identically to what it was at cold boot.
16926 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16928 struct net_device *netdev = pci_get_drvdata(pdev);
16929 struct tg3 *tp = netdev_priv(netdev);
16930 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16935 if (pci_enable_device(pdev)) {
16936 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16940 pci_set_master(pdev);
16941 pci_restore_state(pdev);
16942 pci_save_state(pdev);
16944 if (!netif_running(netdev)) {
16945 rc = PCI_ERS_RESULT_RECOVERED;
16949 err = tg3_power_up(tp);
16953 rc = PCI_ERS_RESULT_RECOVERED;
16962 * tg3_io_resume - called when traffic can start flowing again.
16963 * @pdev: Pointer to PCI device
16965 * This callback is called when the error recovery driver tells
16966 * us that its OK to resume normal operation.
16968 static void tg3_io_resume(struct pci_dev *pdev)
16970 struct net_device *netdev = pci_get_drvdata(pdev);
16971 struct tg3 *tp = netdev_priv(netdev);
16976 if (!netif_running(netdev))
16979 tg3_full_lock(tp, 0);
16980 tg3_flag_set(tp, INIT_COMPLETE);
16981 err = tg3_restart_hw(tp, 1);
16983 tg3_full_unlock(tp);
16984 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16988 netif_device_attach(netdev);
16990 tg3_timer_start(tp);
16992 tg3_netif_start(tp);
16994 tg3_full_unlock(tp);
17002 static const struct pci_error_handlers tg3_err_handler = {
17003 .error_detected = tg3_io_error_detected,
17004 .slot_reset = tg3_io_slot_reset,
17005 .resume = tg3_io_resume
17008 static struct pci_driver tg3_driver = {
17009 .name = DRV_MODULE_NAME,
17010 .id_table = tg3_pci_tbl,
17011 .probe = tg3_init_one,
17012 .remove = tg3_remove_one,
17013 .err_handler = &tg3_err_handler,
17014 .driver.pm = TG3_PM_OPS,
17017 static int __init tg3_init(void)
17019 return pci_register_driver(&tg3_driver);
17022 static void __exit tg3_cleanup(void)
17024 pci_unregister_driver(&tg3_driver);
17027 module_init(tg3_init);
17028 module_exit(tg3_cleanup);