2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 121
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "November 2, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) 0
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
518 tg3_write32(tp, off, val);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
641 bit = APE_LOCK_GRANT_DRIVER;
643 bit = 1 << tp->pci_fn;
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
654 u32 status, req, gnt, bit;
656 if (!tg3_flag(tp, ENABLE_APE))
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
666 bit = APE_LOCK_REQ_DRIVER;
668 bit = 1 << tp->pci_fn;
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
684 tg3_ape_write32(tp, req + off, bit);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
688 status = tg3_ape_read32(tp, gnt + off);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp, gnt + off, bit);
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
707 if (!tg3_flag(tp, ENABLE_APE))
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
717 bit = APE_LOCK_GRANT_DRIVER;
719 bit = 1 << tp->pci_fn;
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
728 gnt = TG3_APE_PER_LOCK_GRANT;
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
778 if (!tg3_flag(tp, ENABLE_APE))
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
796 event = APE_EVENT_STATUS_STATE_START;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827 tg3_ape_send_event(tp, event);
830 static void tg3_disable_ints(struct tg3 *tp)
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
840 static void tg3_enable_ints(struct tg3 *tp)
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 if (tg3_flag(tp, 1SHOT_MSI))
856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858 tp->coal_now |= tnapi->coal_now;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp, TAGGED_STATUS) &&
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866 tw32(HOSTCC_MODE, tp->coal_now);
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 struct tg3 *tp = tnapi->tp;
874 struct tg3_hw_status *sblk = tnapi->hw_status;
875 unsigned int work_exists = 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 if (sblk->status & SD_STATUS_LINK_CHG)
883 /* check for TX work to do */
884 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
887 /* check for RX work to do */
888 if (tnapi->rx_rcb_prod_idx &&
889 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
896 * similar to tg3_enable_ints, but it accurately determines whether there
897 * is new work pending and can return without flushing the PIO write
898 * which reenables interrupts
900 static void tg3_int_reenable(struct tg3_napi *tnapi)
902 struct tg3 *tp = tnapi->tp;
904 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
907 /* When doing tagged status, this work check is unnecessary.
908 * The last_tag we write above tells the chip which piece of
909 * work we've completed.
911 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912 tw32(HOSTCC_MODE, tp->coalesce_mode |
913 HOSTCC_MODE_ENABLE | tnapi->coal_now);
916 static void tg3_switch_clocks(struct tg3 *tp)
921 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
924 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
926 orig_clock_ctrl = clock_ctrl;
927 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928 CLOCK_CTRL_CLKRUN_OENABLE |
930 tp->pci_clock_ctrl = clock_ctrl;
932 if (tg3_flag(tp, 5705_PLUS)) {
933 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934 tw32_wait_f(TG3PCI_CLOCK_CTRL,
935 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
937 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938 tw32_wait_f(TG3PCI_CLOCK_CTRL,
940 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
942 tw32_wait_f(TG3PCI_CLOCK_CTRL,
943 clock_ctrl | (CLOCK_CTRL_ALTCLK),
946 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
949 #define PHY_BUSY_LOOPS 5000
951 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
957 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
959 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
965 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966 MI_COM_PHY_ADDR_MASK);
967 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968 MI_COM_REG_ADDR_MASK);
969 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
971 tw32_f(MAC_MI_COM, frame_val);
973 loops = PHY_BUSY_LOOPS;
976 frame_val = tr32(MAC_MI_COM);
978 if ((frame_val & MI_COM_BUSY) == 0) {
980 frame_val = tr32(MAC_MI_COM);
988 *val = frame_val & MI_COM_DATA_MASK;
992 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993 tw32_f(MAC_MI_MODE, tp->mi_mode);
1000 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1006 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1010 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1012 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1016 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017 MI_COM_PHY_ADDR_MASK);
1018 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019 MI_COM_REG_ADDR_MASK);
1020 frame_val |= (val & MI_COM_DATA_MASK);
1021 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1023 tw32_f(MAC_MI_COM, frame_val);
1025 loops = PHY_BUSY_LOOPS;
1026 while (loops != 0) {
1028 frame_val = tr32(MAC_MI_COM);
1029 if ((frame_val & MI_COM_BUSY) == 0) {
1031 frame_val = tr32(MAC_MI_COM);
1041 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042 tw32_f(MAC_MI_MODE, tp->mi_mode);
1049 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1053 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1057 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1061 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1066 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1072 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1076 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1080 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1084 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1089 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1095 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1099 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1101 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1106 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1110 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1112 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1117 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1121 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123 MII_TG3_AUXCTL_SHDWSEL_MISC);
1125 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1130 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1132 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133 set |= MII_TG3_AUXCTL_MISC_WREN;
1135 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1138 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1143 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1149 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1151 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1153 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1154 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1159 static int tg3_bmcr_reset(struct tg3 *tp)
1164 /* OK, reset it, and poll the BMCR_RESET bit until it
1165 * clears or we time out.
1167 phy_control = BMCR_RESET;
1168 err = tg3_writephy(tp, MII_BMCR, phy_control);
1174 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1178 if ((phy_control & BMCR_RESET) == 0) {
1190 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1192 struct tg3 *tp = bp->priv;
1195 spin_lock_bh(&tp->lock);
1197 if (tg3_readphy(tp, reg, &val))
1200 spin_unlock_bh(&tp->lock);
1205 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1207 struct tg3 *tp = bp->priv;
1210 spin_lock_bh(&tp->lock);
1212 if (tg3_writephy(tp, reg, val))
1215 spin_unlock_bh(&tp->lock);
1220 static int tg3_mdio_reset(struct mii_bus *bp)
1225 static void tg3_mdio_config_5785(struct tg3 *tp)
1228 struct phy_device *phydev;
1230 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1231 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1232 case PHY_ID_BCM50610:
1233 case PHY_ID_BCM50610M:
1234 val = MAC_PHYCFG2_50610_LED_MODES;
1236 case PHY_ID_BCMAC131:
1237 val = MAC_PHYCFG2_AC131_LED_MODES;
1239 case PHY_ID_RTL8211C:
1240 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1242 case PHY_ID_RTL8201E:
1243 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1249 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1250 tw32(MAC_PHYCFG2, val);
1252 val = tr32(MAC_PHYCFG1);
1253 val &= ~(MAC_PHYCFG1_RGMII_INT |
1254 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1255 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1256 tw32(MAC_PHYCFG1, val);
1261 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1262 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1263 MAC_PHYCFG2_FMODE_MASK_MASK |
1264 MAC_PHYCFG2_GMODE_MASK_MASK |
1265 MAC_PHYCFG2_ACT_MASK_MASK |
1266 MAC_PHYCFG2_QUAL_MASK_MASK |
1267 MAC_PHYCFG2_INBAND_ENABLE;
1269 tw32(MAC_PHYCFG2, val);
1271 val = tr32(MAC_PHYCFG1);
1272 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1273 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1274 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1275 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1276 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1277 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1278 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1280 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1281 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1282 tw32(MAC_PHYCFG1, val);
1284 val = tr32(MAC_EXT_RGMII_MODE);
1285 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1286 MAC_RGMII_MODE_RX_QUALITY |
1287 MAC_RGMII_MODE_RX_ACTIVITY |
1288 MAC_RGMII_MODE_RX_ENG_DET |
1289 MAC_RGMII_MODE_TX_ENABLE |
1290 MAC_RGMII_MODE_TX_LOWPWR |
1291 MAC_RGMII_MODE_TX_RESET);
1292 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1293 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1294 val |= MAC_RGMII_MODE_RX_INT_B |
1295 MAC_RGMII_MODE_RX_QUALITY |
1296 MAC_RGMII_MODE_RX_ACTIVITY |
1297 MAC_RGMII_MODE_RX_ENG_DET;
1298 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1299 val |= MAC_RGMII_MODE_TX_ENABLE |
1300 MAC_RGMII_MODE_TX_LOWPWR |
1301 MAC_RGMII_MODE_TX_RESET;
1303 tw32(MAC_EXT_RGMII_MODE, val);
1306 static void tg3_mdio_start(struct tg3 *tp)
1308 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1309 tw32_f(MAC_MI_MODE, tp->mi_mode);
1312 if (tg3_flag(tp, MDIOBUS_INITED) &&
1313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1314 tg3_mdio_config_5785(tp);
1317 static int tg3_mdio_init(struct tg3 *tp)
1321 struct phy_device *phydev;
1323 if (tg3_flag(tp, 5717_PLUS)) {
1326 tp->phy_addr = tp->pci_fn + 1;
1328 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1329 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1331 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1332 TG3_CPMU_PHY_STRAP_IS_SERDES;
1336 tp->phy_addr = TG3_PHY_MII_ADDR;
1340 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1343 tp->mdio_bus = mdiobus_alloc();
1344 if (tp->mdio_bus == NULL)
1347 tp->mdio_bus->name = "tg3 mdio bus";
1348 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1349 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1350 tp->mdio_bus->priv = tp;
1351 tp->mdio_bus->parent = &tp->pdev->dev;
1352 tp->mdio_bus->read = &tg3_mdio_read;
1353 tp->mdio_bus->write = &tg3_mdio_write;
1354 tp->mdio_bus->reset = &tg3_mdio_reset;
1355 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1356 tp->mdio_bus->irq = &tp->mdio_irq[0];
1358 for (i = 0; i < PHY_MAX_ADDR; i++)
1359 tp->mdio_bus->irq[i] = PHY_POLL;
1361 /* The bus registration will look for all the PHYs on the mdio bus.
1362 * Unfortunately, it does not ensure the PHY is powered up before
1363 * accessing the PHY ID registers. A chip reset is the
1364 * quickest way to bring the device back to an operational state..
1366 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1369 i = mdiobus_register(tp->mdio_bus);
1371 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1372 mdiobus_free(tp->mdio_bus);
1376 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1378 if (!phydev || !phydev->drv) {
1379 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1380 mdiobus_unregister(tp->mdio_bus);
1381 mdiobus_free(tp->mdio_bus);
1385 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1386 case PHY_ID_BCM57780:
1387 phydev->interface = PHY_INTERFACE_MODE_GMII;
1388 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1390 case PHY_ID_BCM50610:
1391 case PHY_ID_BCM50610M:
1392 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1393 PHY_BRCM_RX_REFCLK_UNUSED |
1394 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1395 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1396 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1397 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1398 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1399 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1400 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1401 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1403 case PHY_ID_RTL8211C:
1404 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1406 case PHY_ID_RTL8201E:
1407 case PHY_ID_BCMAC131:
1408 phydev->interface = PHY_INTERFACE_MODE_MII;
1409 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1410 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1414 tg3_flag_set(tp, MDIOBUS_INITED);
1416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1417 tg3_mdio_config_5785(tp);
1422 static void tg3_mdio_fini(struct tg3 *tp)
1424 if (tg3_flag(tp, MDIOBUS_INITED)) {
1425 tg3_flag_clear(tp, MDIOBUS_INITED);
1426 mdiobus_unregister(tp->mdio_bus);
1427 mdiobus_free(tp->mdio_bus);
1431 /* tp->lock is held. */
1432 static inline void tg3_generate_fw_event(struct tg3 *tp)
1436 val = tr32(GRC_RX_CPU_EVENT);
1437 val |= GRC_RX_CPU_DRIVER_EVENT;
1438 tw32_f(GRC_RX_CPU_EVENT, val);
1440 tp->last_event_jiffies = jiffies;
1443 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1445 /* tp->lock is held. */
1446 static void tg3_wait_for_event_ack(struct tg3 *tp)
1449 unsigned int delay_cnt;
1452 /* If enough time has passed, no wait is necessary. */
1453 time_remain = (long)(tp->last_event_jiffies + 1 +
1454 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1456 if (time_remain < 0)
1459 /* Check if we can shorten the wait time. */
1460 delay_cnt = jiffies_to_usecs(time_remain);
1461 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1462 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1463 delay_cnt = (delay_cnt >> 3) + 1;
1465 for (i = 0; i < delay_cnt; i++) {
1466 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1472 /* tp->lock is held. */
1473 static void tg3_ump_link_report(struct tg3 *tp)
1478 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1481 tg3_wait_for_event_ack(tp);
1483 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1485 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1488 if (!tg3_readphy(tp, MII_BMCR, ®))
1490 if (!tg3_readphy(tp, MII_BMSR, ®))
1491 val |= (reg & 0xffff);
1492 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1495 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1497 if (!tg3_readphy(tp, MII_LPA, ®))
1498 val |= (reg & 0xffff);
1499 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1502 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1503 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1505 if (!tg3_readphy(tp, MII_STAT1000, ®))
1506 val |= (reg & 0xffff);
1508 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1510 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1514 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1516 tg3_generate_fw_event(tp);
1519 /* tp->lock is held. */
1520 static void tg3_stop_fw(struct tg3 *tp)
1522 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1523 /* Wait for RX cpu to ACK the previous event. */
1524 tg3_wait_for_event_ack(tp);
1526 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1528 tg3_generate_fw_event(tp);
1530 /* Wait for RX cpu to ACK this event. */
1531 tg3_wait_for_event_ack(tp);
1535 /* tp->lock is held. */
1536 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1538 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1539 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1541 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1543 case RESET_KIND_INIT:
1544 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548 case RESET_KIND_SHUTDOWN:
1549 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1553 case RESET_KIND_SUSPEND:
1554 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563 if (kind == RESET_KIND_INIT ||
1564 kind == RESET_KIND_SUSPEND)
1565 tg3_ape_driver_state_change(tp, kind);
1568 /* tp->lock is held. */
1569 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1571 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1573 case RESET_KIND_INIT:
1574 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1575 DRV_STATE_START_DONE);
1578 case RESET_KIND_SHUTDOWN:
1579 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580 DRV_STATE_UNLOAD_DONE);
1588 if (kind == RESET_KIND_SHUTDOWN)
1589 tg3_ape_driver_state_change(tp, kind);
1592 /* tp->lock is held. */
1593 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1595 if (tg3_flag(tp, ENABLE_ASF)) {
1597 case RESET_KIND_INIT:
1598 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602 case RESET_KIND_SHUTDOWN:
1603 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1607 case RESET_KIND_SUSPEND:
1608 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1618 static int tg3_poll_fw(struct tg3 *tp)
1623 if (tg3_flag(tp, NO_FWARE_REPORTED))
1626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1627 /* Wait up to 20ms for init done. */
1628 for (i = 0; i < 200; i++) {
1629 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1636 /* Wait for firmware initialization to complete. */
1637 for (i = 0; i < 100000; i++) {
1638 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1639 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1644 /* Chip might not be fitted with firmware. Some Sun onboard
1645 * parts are configured like that. So don't signal the timeout
1646 * of the above loop as an error, but do report the lack of
1647 * running firmware once.
1649 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1650 tg3_flag_set(tp, NO_FWARE_REPORTED);
1652 netdev_info(tp->dev, "No firmware running\n");
1655 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1656 /* The 57765 A0 needs a little more
1657 * time to do some important work.
1665 static void tg3_link_report(struct tg3 *tp)
1667 if (!netif_carrier_ok(tp->dev)) {
1668 netif_info(tp, link, tp->dev, "Link is down\n");
1669 tg3_ump_link_report(tp);
1670 } else if (netif_msg_link(tp)) {
1671 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1672 (tp->link_config.active_speed == SPEED_1000 ?
1674 (tp->link_config.active_speed == SPEED_100 ?
1676 (tp->link_config.active_duplex == DUPLEX_FULL ?
1679 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1680 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1682 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1685 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1686 netdev_info(tp->dev, "EEE is %s\n",
1687 tp->setlpicnt ? "enabled" : "disabled");
1689 tg3_ump_link_report(tp);
1693 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1697 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1698 miireg = ADVERTISE_PAUSE_CAP;
1699 else if (flow_ctrl & FLOW_CTRL_TX)
1700 miireg = ADVERTISE_PAUSE_ASYM;
1701 else if (flow_ctrl & FLOW_CTRL_RX)
1702 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1709 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1713 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1714 miireg = ADVERTISE_1000XPAUSE;
1715 else if (flow_ctrl & FLOW_CTRL_TX)
1716 miireg = ADVERTISE_1000XPSE_ASYM;
1717 else if (flow_ctrl & FLOW_CTRL_RX)
1718 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1725 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1729 if (lcladv & ADVERTISE_1000XPAUSE) {
1730 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1731 if (rmtadv & LPA_1000XPAUSE)
1732 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1733 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1736 if (rmtadv & LPA_1000XPAUSE)
1737 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1739 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1740 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1747 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1751 u32 old_rx_mode = tp->rx_mode;
1752 u32 old_tx_mode = tp->tx_mode;
1754 if (tg3_flag(tp, USE_PHYLIB))
1755 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1757 autoneg = tp->link_config.autoneg;
1759 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1760 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1761 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1763 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1765 flowctrl = tp->link_config.flowctrl;
1767 tp->link_config.active_flowctrl = flowctrl;
1769 if (flowctrl & FLOW_CTRL_RX)
1770 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1772 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1774 if (old_rx_mode != tp->rx_mode)
1775 tw32_f(MAC_RX_MODE, tp->rx_mode);
1777 if (flowctrl & FLOW_CTRL_TX)
1778 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1780 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1782 if (old_tx_mode != tp->tx_mode)
1783 tw32_f(MAC_TX_MODE, tp->tx_mode);
1786 static void tg3_adjust_link(struct net_device *dev)
1788 u8 oldflowctrl, linkmesg = 0;
1789 u32 mac_mode, lcl_adv, rmt_adv;
1790 struct tg3 *tp = netdev_priv(dev);
1791 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1793 spin_lock_bh(&tp->lock);
1795 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1796 MAC_MODE_HALF_DUPLEX);
1798 oldflowctrl = tp->link_config.active_flowctrl;
1804 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1805 mac_mode |= MAC_MODE_PORT_MODE_MII;
1806 else if (phydev->speed == SPEED_1000 ||
1807 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1808 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1810 mac_mode |= MAC_MODE_PORT_MODE_MII;
1812 if (phydev->duplex == DUPLEX_HALF)
1813 mac_mode |= MAC_MODE_HALF_DUPLEX;
1815 lcl_adv = tg3_advert_flowctrl_1000T(
1816 tp->link_config.flowctrl);
1819 rmt_adv = LPA_PAUSE_CAP;
1820 if (phydev->asym_pause)
1821 rmt_adv |= LPA_PAUSE_ASYM;
1824 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1826 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1828 if (mac_mode != tp->mac_mode) {
1829 tp->mac_mode = mac_mode;
1830 tw32_f(MAC_MODE, tp->mac_mode);
1834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1835 if (phydev->speed == SPEED_10)
1837 MAC_MI_STAT_10MBPS_MODE |
1838 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1840 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1843 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1844 tw32(MAC_TX_LENGTHS,
1845 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1846 (6 << TX_LENGTHS_IPG_SHIFT) |
1847 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1849 tw32(MAC_TX_LENGTHS,
1850 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1851 (6 << TX_LENGTHS_IPG_SHIFT) |
1852 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1854 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1855 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1856 phydev->speed != tp->link_config.active_speed ||
1857 phydev->duplex != tp->link_config.active_duplex ||
1858 oldflowctrl != tp->link_config.active_flowctrl)
1861 tp->link_config.active_speed = phydev->speed;
1862 tp->link_config.active_duplex = phydev->duplex;
1864 spin_unlock_bh(&tp->lock);
1867 tg3_link_report(tp);
1870 static int tg3_phy_init(struct tg3 *tp)
1872 struct phy_device *phydev;
1874 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1877 /* Bring the PHY back to a known state. */
1880 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1882 /* Attach the MAC to the PHY. */
1883 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1884 phydev->dev_flags, phydev->interface);
1885 if (IS_ERR(phydev)) {
1886 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1887 return PTR_ERR(phydev);
1890 /* Mask with MAC supported features. */
1891 switch (phydev->interface) {
1892 case PHY_INTERFACE_MODE_GMII:
1893 case PHY_INTERFACE_MODE_RGMII:
1894 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1895 phydev->supported &= (PHY_GBIT_FEATURES |
1897 SUPPORTED_Asym_Pause);
1901 case PHY_INTERFACE_MODE_MII:
1902 phydev->supported &= (PHY_BASIC_FEATURES |
1904 SUPPORTED_Asym_Pause);
1907 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1911 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1913 phydev->advertising = phydev->supported;
1918 static void tg3_phy_start(struct tg3 *tp)
1920 struct phy_device *phydev;
1922 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1925 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1927 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1928 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1929 phydev->speed = tp->link_config.orig_speed;
1930 phydev->duplex = tp->link_config.orig_duplex;
1931 phydev->autoneg = tp->link_config.orig_autoneg;
1932 phydev->advertising = tp->link_config.orig_advertising;
1937 phy_start_aneg(phydev);
1940 static void tg3_phy_stop(struct tg3 *tp)
1942 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1945 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1948 static void tg3_phy_fini(struct tg3 *tp)
1950 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1951 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1952 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1956 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1961 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1964 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1965 /* Cannot do read-modify-write on 5401 */
1966 err = tg3_phy_auxctl_write(tp,
1967 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1968 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1973 err = tg3_phy_auxctl_read(tp,
1974 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1978 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1979 err = tg3_phy_auxctl_write(tp,
1980 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1986 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1990 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1993 tg3_writephy(tp, MII_TG3_FET_TEST,
1994 phytest | MII_TG3_FET_SHADOW_EN);
1995 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1997 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1999 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2000 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2002 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2006 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2010 if (!tg3_flag(tp, 5705_PLUS) ||
2011 (tg3_flag(tp, 5717_PLUS) &&
2012 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2015 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2016 tg3_phy_fet_toggle_apd(tp, enable);
2020 reg = MII_TG3_MISC_SHDW_WREN |
2021 MII_TG3_MISC_SHDW_SCR5_SEL |
2022 MII_TG3_MISC_SHDW_SCR5_LPED |
2023 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2024 MII_TG3_MISC_SHDW_SCR5_SDTL |
2025 MII_TG3_MISC_SHDW_SCR5_C125OE;
2026 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2027 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2029 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2032 reg = MII_TG3_MISC_SHDW_WREN |
2033 MII_TG3_MISC_SHDW_APD_SEL |
2034 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2036 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2038 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2041 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2045 if (!tg3_flag(tp, 5705_PLUS) ||
2046 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2049 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2052 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2053 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2055 tg3_writephy(tp, MII_TG3_FET_TEST,
2056 ephy | MII_TG3_FET_SHADOW_EN);
2057 if (!tg3_readphy(tp, reg, &phy)) {
2059 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2061 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2062 tg3_writephy(tp, reg, phy);
2064 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2069 ret = tg3_phy_auxctl_read(tp,
2070 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2073 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2075 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2076 tg3_phy_auxctl_write(tp,
2077 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2082 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2087 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2090 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2092 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2093 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2096 static void tg3_phy_apply_otp(struct tg3 *tp)
2105 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2108 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2109 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2110 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2112 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2113 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2114 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2116 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2117 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2118 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2120 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2121 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2123 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2124 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2126 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2127 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2128 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2130 tg3_phy_toggle_auxctl_smdsp(tp, false);
2133 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2137 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2142 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2143 current_link_up == 1 &&
2144 tp->link_config.active_duplex == DUPLEX_FULL &&
2145 (tp->link_config.active_speed == SPEED_100 ||
2146 tp->link_config.active_speed == SPEED_1000)) {
2149 if (tp->link_config.active_speed == SPEED_1000)
2150 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2152 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2154 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2156 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2157 TG3_CL45_D7_EEERES_STAT, &val);
2159 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2160 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2164 if (!tp->setlpicnt) {
2165 if (current_link_up == 1 &&
2166 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2167 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2168 tg3_phy_toggle_auxctl_smdsp(tp, false);
2171 val = tr32(TG3_CPMU_EEE_MODE);
2172 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2176 static void tg3_phy_eee_enable(struct tg3 *tp)
2180 if (tp->link_config.active_speed == SPEED_1000 &&
2181 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2184 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2185 val = MII_TG3_DSP_TAP26_ALNOKO |
2186 MII_TG3_DSP_TAP26_RMRXSTO;
2187 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2188 tg3_phy_toggle_auxctl_smdsp(tp, false);
2191 val = tr32(TG3_CPMU_EEE_MODE);
2192 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2195 static int tg3_wait_macro_done(struct tg3 *tp)
2202 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2203 if ((tmp32 & 0x1000) == 0)
2213 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2215 static const u32 test_pat[4][6] = {
2216 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2217 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2218 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2219 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2223 for (chan = 0; chan < 4; chan++) {
2226 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2227 (chan * 0x2000) | 0x0200);
2228 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2230 for (i = 0; i < 6; i++)
2231 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2234 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2235 if (tg3_wait_macro_done(tp)) {
2240 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2241 (chan * 0x2000) | 0x0200);
2242 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2243 if (tg3_wait_macro_done(tp)) {
2248 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2249 if (tg3_wait_macro_done(tp)) {
2254 for (i = 0; i < 6; i += 2) {
2257 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2258 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2259 tg3_wait_macro_done(tp)) {
2265 if (low != test_pat[chan][i] ||
2266 high != test_pat[chan][i+1]) {
2267 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2268 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2269 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2279 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2283 for (chan = 0; chan < 4; chan++) {
2286 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2287 (chan * 0x2000) | 0x0200);
2288 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2289 for (i = 0; i < 6; i++)
2290 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2291 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2292 if (tg3_wait_macro_done(tp))
2299 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2301 u32 reg32, phy9_orig;
2302 int retries, do_phy_reset, err;
2308 err = tg3_bmcr_reset(tp);
2314 /* Disable transmitter and interrupt. */
2315 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2319 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2321 /* Set full-duplex, 1000 mbps. */
2322 tg3_writephy(tp, MII_BMCR,
2323 BMCR_FULLDPLX | BMCR_SPEED1000);
2325 /* Set to master mode. */
2326 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2329 tg3_writephy(tp, MII_CTRL1000,
2330 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2332 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2336 /* Block the PHY control access. */
2337 tg3_phydsp_write(tp, 0x8005, 0x0800);
2339 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2342 } while (--retries);
2344 err = tg3_phy_reset_chanpat(tp);
2348 tg3_phydsp_write(tp, 0x8005, 0x0000);
2350 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2351 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2353 tg3_phy_toggle_auxctl_smdsp(tp, false);
2355 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2357 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2359 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2366 /* This will reset the tigon3 PHY if there is no valid
2367 * link unless the FORCE argument is non-zero.
2369 static int tg3_phy_reset(struct tg3 *tp)
2374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2375 val = tr32(GRC_MISC_CFG);
2376 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2379 err = tg3_readphy(tp, MII_BMSR, &val);
2380 err |= tg3_readphy(tp, MII_BMSR, &val);
2384 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2385 netif_carrier_off(tp->dev);
2386 tg3_link_report(tp);
2389 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2392 err = tg3_phy_reset_5703_4_5(tp);
2399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2400 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2401 cpmuctrl = tr32(TG3_CPMU_CTRL);
2402 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2404 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2407 err = tg3_bmcr_reset(tp);
2411 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2412 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2413 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2415 tw32(TG3_CPMU_CTRL, cpmuctrl);
2418 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2419 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2420 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2421 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2422 CPMU_LSPD_1000MB_MACCLK_12_5) {
2423 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2425 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2429 if (tg3_flag(tp, 5717_PLUS) &&
2430 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2433 tg3_phy_apply_otp(tp);
2435 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2436 tg3_phy_toggle_apd(tp, true);
2438 tg3_phy_toggle_apd(tp, false);
2441 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2442 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2443 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2444 tg3_phydsp_write(tp, 0x000a, 0x0323);
2445 tg3_phy_toggle_auxctl_smdsp(tp, false);
2448 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2449 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2450 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2453 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2454 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2455 tg3_phydsp_write(tp, 0x000a, 0x310b);
2456 tg3_phydsp_write(tp, 0x201f, 0x9506);
2457 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2458 tg3_phy_toggle_auxctl_smdsp(tp, false);
2460 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2461 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2462 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2463 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2464 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2465 tg3_writephy(tp, MII_TG3_TEST1,
2466 MII_TG3_TEST1_TRIM_EN | 0x4);
2468 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2470 tg3_phy_toggle_auxctl_smdsp(tp, false);
2474 /* Set Extended packet length bit (bit 14) on all chips that */
2475 /* support jumbo frames */
2476 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2477 /* Cannot do read-modify-write on 5401 */
2478 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2479 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2480 /* Set bit 14 with read-modify-write to preserve other bits */
2481 err = tg3_phy_auxctl_read(tp,
2482 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2484 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2485 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2488 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2489 * jumbo frames transmission.
2491 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2492 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2493 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2494 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2498 /* adjust output voltage */
2499 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2502 tg3_phy_toggle_automdix(tp, 1);
2503 tg3_phy_set_wirespeed(tp);
2507 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2508 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2509 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2510 TG3_GPIO_MSG_NEED_VAUX)
2511 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2512 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2513 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2514 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2515 (TG3_GPIO_MSG_DRVR_PRES << 12))
2517 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2518 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2519 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2520 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2521 (TG3_GPIO_MSG_NEED_VAUX << 12))
2523 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2529 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2531 status = tr32(TG3_CPMU_DRV_STATUS);
2533 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2534 status &= ~(TG3_GPIO_MSG_MASK << shift);
2535 status |= (newstat << shift);
2537 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2539 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2541 tw32(TG3_CPMU_DRV_STATUS, status);
2543 return status >> TG3_APE_GPIO_MSG_SHIFT;
2546 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2548 if (!tg3_flag(tp, IS_NIC))
2551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2554 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2557 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2559 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2560 TG3_GRC_LCLCTL_PWRSW_DELAY);
2562 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2564 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2565 TG3_GRC_LCLCTL_PWRSW_DELAY);
2571 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2575 if (!tg3_flag(tp, IS_NIC) ||
2576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2580 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2582 tw32_wait_f(GRC_LOCAL_CTRL,
2583 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2584 TG3_GRC_LCLCTL_PWRSW_DELAY);
2586 tw32_wait_f(GRC_LOCAL_CTRL,
2588 TG3_GRC_LCLCTL_PWRSW_DELAY);
2590 tw32_wait_f(GRC_LOCAL_CTRL,
2591 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2592 TG3_GRC_LCLCTL_PWRSW_DELAY);
2595 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2597 if (!tg3_flag(tp, IS_NIC))
2600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2601 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2602 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2603 (GRC_LCLCTRL_GPIO_OE0 |
2604 GRC_LCLCTRL_GPIO_OE1 |
2605 GRC_LCLCTRL_GPIO_OE2 |
2606 GRC_LCLCTRL_GPIO_OUTPUT0 |
2607 GRC_LCLCTRL_GPIO_OUTPUT1),
2608 TG3_GRC_LCLCTL_PWRSW_DELAY);
2609 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2610 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2611 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2612 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2613 GRC_LCLCTRL_GPIO_OE1 |
2614 GRC_LCLCTRL_GPIO_OE2 |
2615 GRC_LCLCTRL_GPIO_OUTPUT0 |
2616 GRC_LCLCTRL_GPIO_OUTPUT1 |
2618 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2619 TG3_GRC_LCLCTL_PWRSW_DELAY);
2621 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2622 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2623 TG3_GRC_LCLCTL_PWRSW_DELAY);
2625 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2626 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2627 TG3_GRC_LCLCTL_PWRSW_DELAY);
2630 u32 grc_local_ctrl = 0;
2632 /* Workaround to prevent overdrawing Amps. */
2633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2634 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2635 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2637 TG3_GRC_LCLCTL_PWRSW_DELAY);
2640 /* On 5753 and variants, GPIO2 cannot be used. */
2641 no_gpio2 = tp->nic_sram_data_cfg &
2642 NIC_SRAM_DATA_CFG_NO_GPIO2;
2644 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2645 GRC_LCLCTRL_GPIO_OE1 |
2646 GRC_LCLCTRL_GPIO_OE2 |
2647 GRC_LCLCTRL_GPIO_OUTPUT1 |
2648 GRC_LCLCTRL_GPIO_OUTPUT2;
2650 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2651 GRC_LCLCTRL_GPIO_OUTPUT2);
2653 tw32_wait_f(GRC_LOCAL_CTRL,
2654 tp->grc_local_ctrl | grc_local_ctrl,
2655 TG3_GRC_LCLCTL_PWRSW_DELAY);
2657 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2659 tw32_wait_f(GRC_LOCAL_CTRL,
2660 tp->grc_local_ctrl | grc_local_ctrl,
2661 TG3_GRC_LCLCTL_PWRSW_DELAY);
2664 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2665 tw32_wait_f(GRC_LOCAL_CTRL,
2666 tp->grc_local_ctrl | grc_local_ctrl,
2667 TG3_GRC_LCLCTL_PWRSW_DELAY);
2672 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2676 /* Serialize power state transitions */
2677 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2680 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2681 msg = TG3_GPIO_MSG_NEED_VAUX;
2683 msg = tg3_set_function_status(tp, msg);
2685 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2688 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2689 tg3_pwrsrc_switch_to_vaux(tp);
2691 tg3_pwrsrc_die_with_vmain(tp);
2694 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2697 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2699 bool need_vaux = false;
2701 /* The GPIOs do something completely different on 57765. */
2702 if (!tg3_flag(tp, IS_NIC) ||
2703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2709 tg3_frob_aux_power_5717(tp, include_wol ?
2710 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2714 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2715 struct net_device *dev_peer;
2717 dev_peer = pci_get_drvdata(tp->pdev_peer);
2719 /* remove_one() may have been run on the peer. */
2721 struct tg3 *tp_peer = netdev_priv(dev_peer);
2723 if (tg3_flag(tp_peer, INIT_COMPLETE))
2726 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2727 tg3_flag(tp_peer, ENABLE_ASF))
2732 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2733 tg3_flag(tp, ENABLE_ASF))
2737 tg3_pwrsrc_switch_to_vaux(tp);
2739 tg3_pwrsrc_die_with_vmain(tp);
2742 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2744 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2746 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2747 if (speed != SPEED_10)
2749 } else if (speed == SPEED_10)
2755 static int tg3_setup_phy(struct tg3 *, int);
2756 static int tg3_halt_cpu(struct tg3 *, u32);
2758 static bool tg3_phy_power_bug(struct tg3 *tp)
2760 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2765 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2774 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2783 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2787 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2789 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2790 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2793 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2794 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2795 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2802 val = tr32(GRC_MISC_CFG);
2803 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2806 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2808 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2811 tg3_writephy(tp, MII_ADVERTISE, 0);
2812 tg3_writephy(tp, MII_BMCR,
2813 BMCR_ANENABLE | BMCR_ANRESTART);
2815 tg3_writephy(tp, MII_TG3_FET_TEST,
2816 phytest | MII_TG3_FET_SHADOW_EN);
2817 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2818 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2820 MII_TG3_FET_SHDW_AUXMODE4,
2823 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2826 } else if (do_low_power) {
2827 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2828 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2830 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2831 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2832 MII_TG3_AUXCTL_PCTL_VREG_11V;
2833 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2836 /* The PHY should not be powered down on some chips because
2839 if (tg3_phy_power_bug(tp))
2842 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2843 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2844 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2845 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2846 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2847 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2850 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2853 /* tp->lock is held. */
2854 static int tg3_nvram_lock(struct tg3 *tp)
2856 if (tg3_flag(tp, NVRAM)) {
2859 if (tp->nvram_lock_cnt == 0) {
2860 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2861 for (i = 0; i < 8000; i++) {
2862 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2867 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2871 tp->nvram_lock_cnt++;
2876 /* tp->lock is held. */
2877 static void tg3_nvram_unlock(struct tg3 *tp)
2879 if (tg3_flag(tp, NVRAM)) {
2880 if (tp->nvram_lock_cnt > 0)
2881 tp->nvram_lock_cnt--;
2882 if (tp->nvram_lock_cnt == 0)
2883 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2887 /* tp->lock is held. */
2888 static void tg3_enable_nvram_access(struct tg3 *tp)
2890 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2891 u32 nvaccess = tr32(NVRAM_ACCESS);
2893 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2897 /* tp->lock is held. */
2898 static void tg3_disable_nvram_access(struct tg3 *tp)
2900 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2901 u32 nvaccess = tr32(NVRAM_ACCESS);
2903 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2907 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2908 u32 offset, u32 *val)
2913 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2916 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2917 EEPROM_ADDR_DEVID_MASK |
2919 tw32(GRC_EEPROM_ADDR,
2921 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2922 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2923 EEPROM_ADDR_ADDR_MASK) |
2924 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2926 for (i = 0; i < 1000; i++) {
2927 tmp = tr32(GRC_EEPROM_ADDR);
2929 if (tmp & EEPROM_ADDR_COMPLETE)
2933 if (!(tmp & EEPROM_ADDR_COMPLETE))
2936 tmp = tr32(GRC_EEPROM_DATA);
2939 * The data will always be opposite the native endian
2940 * format. Perform a blind byteswap to compensate.
2947 #define NVRAM_CMD_TIMEOUT 10000
2949 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2953 tw32(NVRAM_CMD, nvram_cmd);
2954 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2956 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2962 if (i == NVRAM_CMD_TIMEOUT)
2968 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2970 if (tg3_flag(tp, NVRAM) &&
2971 tg3_flag(tp, NVRAM_BUFFERED) &&
2972 tg3_flag(tp, FLASH) &&
2973 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2974 (tp->nvram_jedecnum == JEDEC_ATMEL))
2976 addr = ((addr / tp->nvram_pagesize) <<
2977 ATMEL_AT45DB0X1B_PAGE_POS) +
2978 (addr % tp->nvram_pagesize);
2983 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2985 if (tg3_flag(tp, NVRAM) &&
2986 tg3_flag(tp, NVRAM_BUFFERED) &&
2987 tg3_flag(tp, FLASH) &&
2988 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2989 (tp->nvram_jedecnum == JEDEC_ATMEL))
2991 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2992 tp->nvram_pagesize) +
2993 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2998 /* NOTE: Data read in from NVRAM is byteswapped according to
2999 * the byteswapping settings for all other register accesses.
3000 * tg3 devices are BE devices, so on a BE machine, the data
3001 * returned will be exactly as it is seen in NVRAM. On a LE
3002 * machine, the 32-bit value will be byteswapped.
3004 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3008 if (!tg3_flag(tp, NVRAM))
3009 return tg3_nvram_read_using_eeprom(tp, offset, val);
3011 offset = tg3_nvram_phys_addr(tp, offset);
3013 if (offset > NVRAM_ADDR_MSK)
3016 ret = tg3_nvram_lock(tp);
3020 tg3_enable_nvram_access(tp);
3022 tw32(NVRAM_ADDR, offset);
3023 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3024 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3027 *val = tr32(NVRAM_RDDATA);
3029 tg3_disable_nvram_access(tp);
3031 tg3_nvram_unlock(tp);
3036 /* Ensures NVRAM data is in bytestream format. */
3037 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3040 int res = tg3_nvram_read(tp, offset, &v);
3042 *val = cpu_to_be32(v);
3046 #define RX_CPU_SCRATCH_BASE 0x30000
3047 #define RX_CPU_SCRATCH_SIZE 0x04000
3048 #define TX_CPU_SCRATCH_BASE 0x34000
3049 #define TX_CPU_SCRATCH_SIZE 0x04000
3051 /* tp->lock is held. */
3052 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3056 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3059 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3061 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3064 if (offset == RX_CPU_BASE) {
3065 for (i = 0; i < 10000; i++) {
3066 tw32(offset + CPU_STATE, 0xffffffff);
3067 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3068 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3072 tw32(offset + CPU_STATE, 0xffffffff);
3073 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3076 for (i = 0; i < 10000; i++) {
3077 tw32(offset + CPU_STATE, 0xffffffff);
3078 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3079 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3085 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3086 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3090 /* Clear firmware's nvram arbitration. */
3091 if (tg3_flag(tp, NVRAM))
3092 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3097 unsigned int fw_base;
3098 unsigned int fw_len;
3099 const __be32 *fw_data;
3102 /* tp->lock is held. */
3103 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3104 u32 cpu_scratch_base, int cpu_scratch_size,
3105 struct fw_info *info)
3107 int err, lock_err, i;
3108 void (*write_op)(struct tg3 *, u32, u32);
3110 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3112 "%s: Trying to load TX cpu firmware which is 5705\n",
3117 if (tg3_flag(tp, 5705_PLUS))
3118 write_op = tg3_write_mem;
3120 write_op = tg3_write_indirect_reg32;
3122 /* It is possible that bootcode is still loading at this point.
3123 * Get the nvram lock first before halting the cpu.
3125 lock_err = tg3_nvram_lock(tp);
3126 err = tg3_halt_cpu(tp, cpu_base);
3128 tg3_nvram_unlock(tp);
3132 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3133 write_op(tp, cpu_scratch_base + i, 0);
3134 tw32(cpu_base + CPU_STATE, 0xffffffff);
3135 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3136 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3137 write_op(tp, (cpu_scratch_base +
3138 (info->fw_base & 0xffff) +
3140 be32_to_cpu(info->fw_data[i]));
3148 /* tp->lock is held. */
3149 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3151 struct fw_info info;
3152 const __be32 *fw_data;
3155 fw_data = (void *)tp->fw->data;
3157 /* Firmware blob starts with version numbers, followed by
3158 start address and length. We are setting complete length.
3159 length = end_address_of_bss - start_address_of_text.
3160 Remainder is the blob to be loaded contiguously
3161 from start address. */
3163 info.fw_base = be32_to_cpu(fw_data[1]);
3164 info.fw_len = tp->fw->size - 12;
3165 info.fw_data = &fw_data[3];
3167 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3168 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3173 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3174 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3179 /* Now startup only the RX cpu. */
3180 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3181 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3183 for (i = 0; i < 5; i++) {
3184 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3186 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3187 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3188 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3192 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3193 "should be %08x\n", __func__,
3194 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3197 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3198 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3203 /* tp->lock is held. */
3204 static int tg3_load_tso_firmware(struct tg3 *tp)
3206 struct fw_info info;
3207 const __be32 *fw_data;
3208 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3211 if (tg3_flag(tp, HW_TSO_1) ||
3212 tg3_flag(tp, HW_TSO_2) ||
3213 tg3_flag(tp, HW_TSO_3))
3216 fw_data = (void *)tp->fw->data;
3218 /* Firmware blob starts with version numbers, followed by
3219 start address and length. We are setting complete length.
3220 length = end_address_of_bss - start_address_of_text.
3221 Remainder is the blob to be loaded contiguously
3222 from start address. */
3224 info.fw_base = be32_to_cpu(fw_data[1]);
3225 cpu_scratch_size = tp->fw_len;
3226 info.fw_len = tp->fw->size - 12;
3227 info.fw_data = &fw_data[3];
3229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3230 cpu_base = RX_CPU_BASE;
3231 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3233 cpu_base = TX_CPU_BASE;
3234 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3235 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3238 err = tg3_load_firmware_cpu(tp, cpu_base,
3239 cpu_scratch_base, cpu_scratch_size,
3244 /* Now startup the cpu. */
3245 tw32(cpu_base + CPU_STATE, 0xffffffff);
3246 tw32_f(cpu_base + CPU_PC, info.fw_base);
3248 for (i = 0; i < 5; i++) {
3249 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3251 tw32(cpu_base + CPU_STATE, 0xffffffff);
3252 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3253 tw32_f(cpu_base + CPU_PC, info.fw_base);
3258 "%s fails to set CPU PC, is %08x should be %08x\n",
3259 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3262 tw32(cpu_base + CPU_STATE, 0xffffffff);
3263 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3268 /* tp->lock is held. */
3269 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3271 u32 addr_high, addr_low;
3274 addr_high = ((tp->dev->dev_addr[0] << 8) |
3275 tp->dev->dev_addr[1]);
3276 addr_low = ((tp->dev->dev_addr[2] << 24) |
3277 (tp->dev->dev_addr[3] << 16) |
3278 (tp->dev->dev_addr[4] << 8) |
3279 (tp->dev->dev_addr[5] << 0));
3280 for (i = 0; i < 4; i++) {
3281 if (i == 1 && skip_mac_1)
3283 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3284 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3289 for (i = 0; i < 12; i++) {
3290 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3291 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3295 addr_high = (tp->dev->dev_addr[0] +
3296 tp->dev->dev_addr[1] +
3297 tp->dev->dev_addr[2] +
3298 tp->dev->dev_addr[3] +
3299 tp->dev->dev_addr[4] +
3300 tp->dev->dev_addr[5]) &
3301 TX_BACKOFF_SEED_MASK;
3302 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3305 static void tg3_enable_register_access(struct tg3 *tp)
3308 * Make sure register accesses (indirect or otherwise) will function
3311 pci_write_config_dword(tp->pdev,
3312 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3315 static int tg3_power_up(struct tg3 *tp)
3319 tg3_enable_register_access(tp);
3321 err = pci_set_power_state(tp->pdev, PCI_D0);
3323 /* Switch out of Vaux if it is a NIC */
3324 tg3_pwrsrc_switch_to_vmain(tp);
3326 netdev_err(tp->dev, "Transition to D0 failed\n");
3332 static int tg3_power_down_prepare(struct tg3 *tp)
3335 bool device_should_wake, do_low_power;
3337 tg3_enable_register_access(tp);
3339 /* Restore the CLKREQ setting. */
3340 if (tg3_flag(tp, CLKREQ_BUG)) {
3343 pci_read_config_word(tp->pdev,
3344 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3346 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3347 pci_write_config_word(tp->pdev,
3348 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3352 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3353 tw32(TG3PCI_MISC_HOST_CTRL,
3354 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3356 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3357 tg3_flag(tp, WOL_ENABLE);
3359 if (tg3_flag(tp, USE_PHYLIB)) {
3360 do_low_power = false;
3361 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3362 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3363 struct phy_device *phydev;
3364 u32 phyid, advertising;
3366 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3368 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3370 tp->link_config.orig_speed = phydev->speed;
3371 tp->link_config.orig_duplex = phydev->duplex;
3372 tp->link_config.orig_autoneg = phydev->autoneg;
3373 tp->link_config.orig_advertising = phydev->advertising;
3375 advertising = ADVERTISED_TP |
3377 ADVERTISED_Autoneg |
3378 ADVERTISED_10baseT_Half;
3380 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3381 if (tg3_flag(tp, WOL_SPEED_100MB))
3383 ADVERTISED_100baseT_Half |
3384 ADVERTISED_100baseT_Full |
3385 ADVERTISED_10baseT_Full;
3387 advertising |= ADVERTISED_10baseT_Full;
3390 phydev->advertising = advertising;
3392 phy_start_aneg(phydev);
3394 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3395 if (phyid != PHY_ID_BCMAC131) {
3396 phyid &= PHY_BCM_OUI_MASK;
3397 if (phyid == PHY_BCM_OUI_1 ||
3398 phyid == PHY_BCM_OUI_2 ||
3399 phyid == PHY_BCM_OUI_3)
3400 do_low_power = true;
3404 do_low_power = true;
3406 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3407 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3408 tp->link_config.orig_speed = tp->link_config.speed;
3409 tp->link_config.orig_duplex = tp->link_config.duplex;
3410 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3413 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3414 tp->link_config.speed = SPEED_10;
3415 tp->link_config.duplex = DUPLEX_HALF;
3416 tp->link_config.autoneg = AUTONEG_ENABLE;
3417 tg3_setup_phy(tp, 0);
3421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3424 val = tr32(GRC_VCPU_EXT_CTRL);
3425 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3426 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3430 for (i = 0; i < 200; i++) {
3431 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3432 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3437 if (tg3_flag(tp, WOL_CAP))
3438 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3439 WOL_DRV_STATE_SHUTDOWN |
3443 if (device_should_wake) {
3446 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3448 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3449 tg3_phy_auxctl_write(tp,
3450 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3451 MII_TG3_AUXCTL_PCTL_WOL_EN |
3452 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3453 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3457 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3458 mac_mode = MAC_MODE_PORT_MODE_GMII;
3460 mac_mode = MAC_MODE_PORT_MODE_MII;
3462 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3463 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3465 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3466 SPEED_100 : SPEED_10;
3467 if (tg3_5700_link_polarity(tp, speed))
3468 mac_mode |= MAC_MODE_LINK_POLARITY;
3470 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3473 mac_mode = MAC_MODE_PORT_MODE_TBI;
3476 if (!tg3_flag(tp, 5750_PLUS))
3477 tw32(MAC_LED_CTRL, tp->led_ctrl);
3479 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3480 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3481 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3482 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3484 if (tg3_flag(tp, ENABLE_APE))
3485 mac_mode |= MAC_MODE_APE_TX_EN |
3486 MAC_MODE_APE_RX_EN |
3487 MAC_MODE_TDE_ENABLE;
3489 tw32_f(MAC_MODE, mac_mode);
3492 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3496 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3497 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3501 base_val = tp->pci_clock_ctrl;
3502 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3503 CLOCK_CTRL_TXCLK_DISABLE);
3505 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3506 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3507 } else if (tg3_flag(tp, 5780_CLASS) ||
3508 tg3_flag(tp, CPMU_PRESENT) ||
3509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3511 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3512 u32 newbits1, newbits2;
3514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3516 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3517 CLOCK_CTRL_TXCLK_DISABLE |
3519 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3520 } else if (tg3_flag(tp, 5705_PLUS)) {
3521 newbits1 = CLOCK_CTRL_625_CORE;
3522 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3524 newbits1 = CLOCK_CTRL_ALTCLK;
3525 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3528 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3531 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3534 if (!tg3_flag(tp, 5705_PLUS)) {
3537 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3539 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3540 CLOCK_CTRL_TXCLK_DISABLE |
3541 CLOCK_CTRL_44MHZ_CORE);
3543 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3546 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3547 tp->pci_clock_ctrl | newbits3, 40);
3551 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3552 tg3_power_down_phy(tp, do_low_power);
3554 tg3_frob_aux_power(tp, true);
3556 /* Workaround for unstable PLL clock */
3557 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3558 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3559 u32 val = tr32(0x7d00);
3561 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3563 if (!tg3_flag(tp, ENABLE_ASF)) {
3566 err = tg3_nvram_lock(tp);
3567 tg3_halt_cpu(tp, RX_CPU_BASE);
3569 tg3_nvram_unlock(tp);
3573 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3578 static void tg3_power_down(struct tg3 *tp)
3580 tg3_power_down_prepare(tp);
3582 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3583 pci_set_power_state(tp->pdev, PCI_D3hot);
3586 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3588 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3589 case MII_TG3_AUX_STAT_10HALF:
3591 *duplex = DUPLEX_HALF;
3594 case MII_TG3_AUX_STAT_10FULL:
3596 *duplex = DUPLEX_FULL;
3599 case MII_TG3_AUX_STAT_100HALF:
3601 *duplex = DUPLEX_HALF;
3604 case MII_TG3_AUX_STAT_100FULL:
3606 *duplex = DUPLEX_FULL;
3609 case MII_TG3_AUX_STAT_1000HALF:
3610 *speed = SPEED_1000;
3611 *duplex = DUPLEX_HALF;
3614 case MII_TG3_AUX_STAT_1000FULL:
3615 *speed = SPEED_1000;
3616 *duplex = DUPLEX_FULL;
3620 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3621 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3623 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3627 *speed = SPEED_INVALID;
3628 *duplex = DUPLEX_INVALID;
3633 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3638 new_adv = ADVERTISE_CSMA;
3639 if (advertise & ADVERTISED_10baseT_Half)
3640 new_adv |= ADVERTISE_10HALF;
3641 if (advertise & ADVERTISED_10baseT_Full)
3642 new_adv |= ADVERTISE_10FULL;
3643 if (advertise & ADVERTISED_100baseT_Half)
3644 new_adv |= ADVERTISE_100HALF;
3645 if (advertise & ADVERTISED_100baseT_Full)
3646 new_adv |= ADVERTISE_100FULL;
3648 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3650 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3654 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3658 if (advertise & ADVERTISED_1000baseT_Half)
3659 new_adv |= ADVERTISE_1000HALF;
3660 if (advertise & ADVERTISED_1000baseT_Full)
3661 new_adv |= ADVERTISE_1000FULL;
3663 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3664 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3665 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3667 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3671 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3674 tw32(TG3_CPMU_EEE_MODE,
3675 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3677 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
3682 /* Advertise 100-BaseTX EEE ability */
3683 if (advertise & ADVERTISED_100baseT_Full)
3684 val |= MDIO_AN_EEE_ADV_100TX;
3685 /* Advertise 1000-BaseT EEE ability */
3686 if (advertise & ADVERTISED_1000baseT_Full)
3687 val |= MDIO_AN_EEE_ADV_1000T;
3688 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3692 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3694 case ASIC_REV_57765:
3696 /* If we advertised any eee advertisements above... */
3698 val = MII_TG3_DSP_TAP26_ALNOKO |
3699 MII_TG3_DSP_TAP26_RMRXSTO |
3700 MII_TG3_DSP_TAP26_OPCSINPT;
3701 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3704 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3705 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3706 MII_TG3_DSP_CH34TP2_HIBW01);
3709 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
3718 static void tg3_phy_copper_begin(struct tg3 *tp)
3723 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3724 new_adv = ADVERTISED_10baseT_Half |
3725 ADVERTISED_10baseT_Full;
3726 if (tg3_flag(tp, WOL_SPEED_100MB))
3727 new_adv |= ADVERTISED_100baseT_Half |
3728 ADVERTISED_100baseT_Full;
3730 tg3_phy_autoneg_cfg(tp, new_adv,
3731 FLOW_CTRL_TX | FLOW_CTRL_RX);
3732 } else if (tp->link_config.speed == SPEED_INVALID) {
3733 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3734 tp->link_config.advertising &=
3735 ~(ADVERTISED_1000baseT_Half |
3736 ADVERTISED_1000baseT_Full);
3738 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3739 tp->link_config.flowctrl);
3741 /* Asking for a specific link mode. */
3742 if (tp->link_config.speed == SPEED_1000) {
3743 if (tp->link_config.duplex == DUPLEX_FULL)
3744 new_adv = ADVERTISED_1000baseT_Full;
3746 new_adv = ADVERTISED_1000baseT_Half;
3747 } else if (tp->link_config.speed == SPEED_100) {
3748 if (tp->link_config.duplex == DUPLEX_FULL)
3749 new_adv = ADVERTISED_100baseT_Full;
3751 new_adv = ADVERTISED_100baseT_Half;
3753 if (tp->link_config.duplex == DUPLEX_FULL)
3754 new_adv = ADVERTISED_10baseT_Full;
3756 new_adv = ADVERTISED_10baseT_Half;
3759 tg3_phy_autoneg_cfg(tp, new_adv,
3760 tp->link_config.flowctrl);
3763 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3764 tp->link_config.speed != SPEED_INVALID) {
3765 u32 bmcr, orig_bmcr;
3767 tp->link_config.active_speed = tp->link_config.speed;
3768 tp->link_config.active_duplex = tp->link_config.duplex;
3771 switch (tp->link_config.speed) {
3777 bmcr |= BMCR_SPEED100;
3781 bmcr |= BMCR_SPEED1000;
3785 if (tp->link_config.duplex == DUPLEX_FULL)
3786 bmcr |= BMCR_FULLDPLX;
3788 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3789 (bmcr != orig_bmcr)) {
3790 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3791 for (i = 0; i < 1500; i++) {
3795 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3796 tg3_readphy(tp, MII_BMSR, &tmp))
3798 if (!(tmp & BMSR_LSTATUS)) {
3803 tg3_writephy(tp, MII_BMCR, bmcr);
3807 tg3_writephy(tp, MII_BMCR,
3808 BMCR_ANENABLE | BMCR_ANRESTART);
3812 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3816 /* Turn off tap power management. */
3817 /* Set Extended packet length bit */
3818 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3820 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3821 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3822 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3823 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3824 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3831 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3833 u32 adv_reg, all_mask = 0;
3835 if (mask & ADVERTISED_10baseT_Half)
3836 all_mask |= ADVERTISE_10HALF;
3837 if (mask & ADVERTISED_10baseT_Full)
3838 all_mask |= ADVERTISE_10FULL;
3839 if (mask & ADVERTISED_100baseT_Half)
3840 all_mask |= ADVERTISE_100HALF;
3841 if (mask & ADVERTISED_100baseT_Full)
3842 all_mask |= ADVERTISE_100FULL;
3844 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3847 if ((adv_reg & ADVERTISE_ALL) != all_mask)
3850 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3854 if (mask & ADVERTISED_1000baseT_Half)
3855 all_mask |= ADVERTISE_1000HALF;
3856 if (mask & ADVERTISED_1000baseT_Full)
3857 all_mask |= ADVERTISE_1000FULL;
3859 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3862 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3863 if (tg3_ctrl != all_mask)
3870 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3874 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3877 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3878 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3880 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3881 if (curadv != reqadv)
3884 if (tg3_flag(tp, PAUSE_AUTONEG))
3885 tg3_readphy(tp, MII_LPA, rmtadv);
3887 /* Reprogram the advertisement register, even if it
3888 * does not affect the current link. If the link
3889 * gets renegotiated in the future, we can save an
3890 * additional renegotiation cycle by advertising
3891 * it correctly in the first place.
3893 if (curadv != reqadv) {
3894 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3895 ADVERTISE_PAUSE_ASYM);
3896 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3903 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3905 int current_link_up;
3907 u32 lcl_adv, rmt_adv;
3915 (MAC_STATUS_SYNC_CHANGED |
3916 MAC_STATUS_CFG_CHANGED |
3917 MAC_STATUS_MI_COMPLETION |
3918 MAC_STATUS_LNKSTATE_CHANGED));
3921 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3923 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3927 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3929 /* Some third-party PHYs need to be reset on link going
3932 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3935 netif_carrier_ok(tp->dev)) {
3936 tg3_readphy(tp, MII_BMSR, &bmsr);
3937 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3938 !(bmsr & BMSR_LSTATUS))
3944 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3945 tg3_readphy(tp, MII_BMSR, &bmsr);
3946 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3947 !tg3_flag(tp, INIT_COMPLETE))
3950 if (!(bmsr & BMSR_LSTATUS)) {
3951 err = tg3_init_5401phy_dsp(tp);
3955 tg3_readphy(tp, MII_BMSR, &bmsr);
3956 for (i = 0; i < 1000; i++) {
3958 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3959 (bmsr & BMSR_LSTATUS)) {
3965 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3966 TG3_PHY_REV_BCM5401_B0 &&
3967 !(bmsr & BMSR_LSTATUS) &&
3968 tp->link_config.active_speed == SPEED_1000) {
3969 err = tg3_phy_reset(tp);
3971 err = tg3_init_5401phy_dsp(tp);
3976 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3977 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3978 /* 5701 {A0,B0} CRC bug workaround */
3979 tg3_writephy(tp, 0x15, 0x0a75);
3980 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3981 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3982 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3985 /* Clear pending interrupts... */
3986 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3987 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3989 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3990 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3991 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3992 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3996 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3997 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3998 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4000 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4003 current_link_up = 0;
4004 current_speed = SPEED_INVALID;
4005 current_duplex = DUPLEX_INVALID;
4007 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4008 err = tg3_phy_auxctl_read(tp,
4009 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4011 if (!err && !(val & (1 << 10))) {
4012 tg3_phy_auxctl_write(tp,
4013 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4020 for (i = 0; i < 100; i++) {
4021 tg3_readphy(tp, MII_BMSR, &bmsr);
4022 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4023 (bmsr & BMSR_LSTATUS))
4028 if (bmsr & BMSR_LSTATUS) {
4031 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4032 for (i = 0; i < 2000; i++) {
4034 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4039 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4044 for (i = 0; i < 200; i++) {
4045 tg3_readphy(tp, MII_BMCR, &bmcr);
4046 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4048 if (bmcr && bmcr != 0x7fff)
4056 tp->link_config.active_speed = current_speed;
4057 tp->link_config.active_duplex = current_duplex;
4059 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4060 if ((bmcr & BMCR_ANENABLE) &&
4061 tg3_copper_is_advertising_all(tp,
4062 tp->link_config.advertising)) {
4063 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4065 current_link_up = 1;
4068 if (!(bmcr & BMCR_ANENABLE) &&
4069 tp->link_config.speed == current_speed &&
4070 tp->link_config.duplex == current_duplex &&
4071 tp->link_config.flowctrl ==
4072 tp->link_config.active_flowctrl) {
4073 current_link_up = 1;
4077 if (current_link_up == 1 &&
4078 tp->link_config.active_duplex == DUPLEX_FULL)
4079 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4083 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4084 tg3_phy_copper_begin(tp);
4086 tg3_readphy(tp, MII_BMSR, &bmsr);
4087 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4088 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4089 current_link_up = 1;
4092 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4093 if (current_link_up == 1) {
4094 if (tp->link_config.active_speed == SPEED_100 ||
4095 tp->link_config.active_speed == SPEED_10)
4096 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4098 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4099 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4100 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4102 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4104 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4105 if (tp->link_config.active_duplex == DUPLEX_HALF)
4106 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4109 if (current_link_up == 1 &&
4110 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4111 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4113 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4116 /* ??? Without this setting Netgear GA302T PHY does not
4117 * ??? send/receive packets...
4119 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4120 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4121 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4122 tw32_f(MAC_MI_MODE, tp->mi_mode);
4126 tw32_f(MAC_MODE, tp->mac_mode);
4129 tg3_phy_eee_adjust(tp, current_link_up);
4131 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4132 /* Polled via timer. */
4133 tw32_f(MAC_EVENT, 0);
4135 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4139 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4140 current_link_up == 1 &&
4141 tp->link_config.active_speed == SPEED_1000 &&
4142 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4145 (MAC_STATUS_SYNC_CHANGED |
4146 MAC_STATUS_CFG_CHANGED));
4149 NIC_SRAM_FIRMWARE_MBOX,
4150 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4153 /* Prevent send BD corruption. */
4154 if (tg3_flag(tp, CLKREQ_BUG)) {
4155 u16 oldlnkctl, newlnkctl;
4157 pci_read_config_word(tp->pdev,
4158 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4160 if (tp->link_config.active_speed == SPEED_100 ||
4161 tp->link_config.active_speed == SPEED_10)
4162 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4164 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4165 if (newlnkctl != oldlnkctl)
4166 pci_write_config_word(tp->pdev,
4167 pci_pcie_cap(tp->pdev) +
4168 PCI_EXP_LNKCTL, newlnkctl);
4171 if (current_link_up != netif_carrier_ok(tp->dev)) {
4172 if (current_link_up)
4173 netif_carrier_on(tp->dev);
4175 netif_carrier_off(tp->dev);
4176 tg3_link_report(tp);
4182 struct tg3_fiber_aneginfo {
4184 #define ANEG_STATE_UNKNOWN 0
4185 #define ANEG_STATE_AN_ENABLE 1
4186 #define ANEG_STATE_RESTART_INIT 2
4187 #define ANEG_STATE_RESTART 3
4188 #define ANEG_STATE_DISABLE_LINK_OK 4
4189 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4190 #define ANEG_STATE_ABILITY_DETECT 6
4191 #define ANEG_STATE_ACK_DETECT_INIT 7
4192 #define ANEG_STATE_ACK_DETECT 8
4193 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4194 #define ANEG_STATE_COMPLETE_ACK 10
4195 #define ANEG_STATE_IDLE_DETECT_INIT 11
4196 #define ANEG_STATE_IDLE_DETECT 12
4197 #define ANEG_STATE_LINK_OK 13
4198 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4199 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4202 #define MR_AN_ENABLE 0x00000001
4203 #define MR_RESTART_AN 0x00000002
4204 #define MR_AN_COMPLETE 0x00000004
4205 #define MR_PAGE_RX 0x00000008
4206 #define MR_NP_LOADED 0x00000010
4207 #define MR_TOGGLE_TX 0x00000020
4208 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4209 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4210 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4211 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4212 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4213 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4214 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4215 #define MR_TOGGLE_RX 0x00002000
4216 #define MR_NP_RX 0x00004000
4218 #define MR_LINK_OK 0x80000000
4220 unsigned long link_time, cur_time;
4222 u32 ability_match_cfg;
4223 int ability_match_count;
4225 char ability_match, idle_match, ack_match;
4227 u32 txconfig, rxconfig;
4228 #define ANEG_CFG_NP 0x00000080
4229 #define ANEG_CFG_ACK 0x00000040
4230 #define ANEG_CFG_RF2 0x00000020
4231 #define ANEG_CFG_RF1 0x00000010
4232 #define ANEG_CFG_PS2 0x00000001
4233 #define ANEG_CFG_PS1 0x00008000
4234 #define ANEG_CFG_HD 0x00004000
4235 #define ANEG_CFG_FD 0x00002000
4236 #define ANEG_CFG_INVAL 0x00001f06
4241 #define ANEG_TIMER_ENAB 2
4242 #define ANEG_FAILED -1
4244 #define ANEG_STATE_SETTLE_TIME 10000
4246 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4247 struct tg3_fiber_aneginfo *ap)
4250 unsigned long delta;
4254 if (ap->state == ANEG_STATE_UNKNOWN) {
4258 ap->ability_match_cfg = 0;
4259 ap->ability_match_count = 0;
4260 ap->ability_match = 0;
4266 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4267 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4269 if (rx_cfg_reg != ap->ability_match_cfg) {
4270 ap->ability_match_cfg = rx_cfg_reg;
4271 ap->ability_match = 0;
4272 ap->ability_match_count = 0;
4274 if (++ap->ability_match_count > 1) {
4275 ap->ability_match = 1;
4276 ap->ability_match_cfg = rx_cfg_reg;
4279 if (rx_cfg_reg & ANEG_CFG_ACK)
4287 ap->ability_match_cfg = 0;
4288 ap->ability_match_count = 0;
4289 ap->ability_match = 0;
4295 ap->rxconfig = rx_cfg_reg;
4298 switch (ap->state) {
4299 case ANEG_STATE_UNKNOWN:
4300 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4301 ap->state = ANEG_STATE_AN_ENABLE;
4304 case ANEG_STATE_AN_ENABLE:
4305 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4306 if (ap->flags & MR_AN_ENABLE) {
4309 ap->ability_match_cfg = 0;
4310 ap->ability_match_count = 0;
4311 ap->ability_match = 0;
4315 ap->state = ANEG_STATE_RESTART_INIT;
4317 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4321 case ANEG_STATE_RESTART_INIT:
4322 ap->link_time = ap->cur_time;
4323 ap->flags &= ~(MR_NP_LOADED);
4325 tw32(MAC_TX_AUTO_NEG, 0);
4326 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4327 tw32_f(MAC_MODE, tp->mac_mode);
4330 ret = ANEG_TIMER_ENAB;
4331 ap->state = ANEG_STATE_RESTART;
4334 case ANEG_STATE_RESTART:
4335 delta = ap->cur_time - ap->link_time;
4336 if (delta > ANEG_STATE_SETTLE_TIME)
4337 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4339 ret = ANEG_TIMER_ENAB;
4342 case ANEG_STATE_DISABLE_LINK_OK:
4346 case ANEG_STATE_ABILITY_DETECT_INIT:
4347 ap->flags &= ~(MR_TOGGLE_TX);
4348 ap->txconfig = ANEG_CFG_FD;
4349 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4350 if (flowctrl & ADVERTISE_1000XPAUSE)
4351 ap->txconfig |= ANEG_CFG_PS1;
4352 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4353 ap->txconfig |= ANEG_CFG_PS2;
4354 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4355 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4356 tw32_f(MAC_MODE, tp->mac_mode);
4359 ap->state = ANEG_STATE_ABILITY_DETECT;
4362 case ANEG_STATE_ABILITY_DETECT:
4363 if (ap->ability_match != 0 && ap->rxconfig != 0)
4364 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4367 case ANEG_STATE_ACK_DETECT_INIT:
4368 ap->txconfig |= ANEG_CFG_ACK;
4369 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4370 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4371 tw32_f(MAC_MODE, tp->mac_mode);
4374 ap->state = ANEG_STATE_ACK_DETECT;
4377 case ANEG_STATE_ACK_DETECT:
4378 if (ap->ack_match != 0) {
4379 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4380 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4381 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4383 ap->state = ANEG_STATE_AN_ENABLE;
4385 } else if (ap->ability_match != 0 &&
4386 ap->rxconfig == 0) {
4387 ap->state = ANEG_STATE_AN_ENABLE;
4391 case ANEG_STATE_COMPLETE_ACK_INIT:
4392 if (ap->rxconfig & ANEG_CFG_INVAL) {
4396 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4397 MR_LP_ADV_HALF_DUPLEX |
4398 MR_LP_ADV_SYM_PAUSE |
4399 MR_LP_ADV_ASYM_PAUSE |
4400 MR_LP_ADV_REMOTE_FAULT1 |
4401 MR_LP_ADV_REMOTE_FAULT2 |
4402 MR_LP_ADV_NEXT_PAGE |
4405 if (ap->rxconfig & ANEG_CFG_FD)
4406 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4407 if (ap->rxconfig & ANEG_CFG_HD)
4408 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4409 if (ap->rxconfig & ANEG_CFG_PS1)
4410 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4411 if (ap->rxconfig & ANEG_CFG_PS2)
4412 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4413 if (ap->rxconfig & ANEG_CFG_RF1)
4414 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4415 if (ap->rxconfig & ANEG_CFG_RF2)
4416 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4417 if (ap->rxconfig & ANEG_CFG_NP)
4418 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4420 ap->link_time = ap->cur_time;
4422 ap->flags ^= (MR_TOGGLE_TX);
4423 if (ap->rxconfig & 0x0008)
4424 ap->flags |= MR_TOGGLE_RX;
4425 if (ap->rxconfig & ANEG_CFG_NP)
4426 ap->flags |= MR_NP_RX;
4427 ap->flags |= MR_PAGE_RX;
4429 ap->state = ANEG_STATE_COMPLETE_ACK;
4430 ret = ANEG_TIMER_ENAB;
4433 case ANEG_STATE_COMPLETE_ACK:
4434 if (ap->ability_match != 0 &&
4435 ap->rxconfig == 0) {
4436 ap->state = ANEG_STATE_AN_ENABLE;
4439 delta = ap->cur_time - ap->link_time;
4440 if (delta > ANEG_STATE_SETTLE_TIME) {
4441 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4442 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4444 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4445 !(ap->flags & MR_NP_RX)) {
4446 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4454 case ANEG_STATE_IDLE_DETECT_INIT:
4455 ap->link_time = ap->cur_time;
4456 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4457 tw32_f(MAC_MODE, tp->mac_mode);
4460 ap->state = ANEG_STATE_IDLE_DETECT;
4461 ret = ANEG_TIMER_ENAB;
4464 case ANEG_STATE_IDLE_DETECT:
4465 if (ap->ability_match != 0 &&
4466 ap->rxconfig == 0) {
4467 ap->state = ANEG_STATE_AN_ENABLE;
4470 delta = ap->cur_time - ap->link_time;
4471 if (delta > ANEG_STATE_SETTLE_TIME) {
4472 /* XXX another gem from the Broadcom driver :( */
4473 ap->state = ANEG_STATE_LINK_OK;
4477 case ANEG_STATE_LINK_OK:
4478 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4482 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4483 /* ??? unimplemented */
4486 case ANEG_STATE_NEXT_PAGE_WAIT:
4487 /* ??? unimplemented */
4498 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4501 struct tg3_fiber_aneginfo aninfo;
4502 int status = ANEG_FAILED;
4506 tw32_f(MAC_TX_AUTO_NEG, 0);
4508 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4509 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4512 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4515 memset(&aninfo, 0, sizeof(aninfo));
4516 aninfo.flags |= MR_AN_ENABLE;
4517 aninfo.state = ANEG_STATE_UNKNOWN;
4518 aninfo.cur_time = 0;
4520 while (++tick < 195000) {
4521 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4522 if (status == ANEG_DONE || status == ANEG_FAILED)
4528 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4529 tw32_f(MAC_MODE, tp->mac_mode);
4532 *txflags = aninfo.txconfig;
4533 *rxflags = aninfo.flags;
4535 if (status == ANEG_DONE &&
4536 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4537 MR_LP_ADV_FULL_DUPLEX)))
4543 static void tg3_init_bcm8002(struct tg3 *tp)
4545 u32 mac_status = tr32(MAC_STATUS);
4548 /* Reset when initting first time or we have a link. */
4549 if (tg3_flag(tp, INIT_COMPLETE) &&
4550 !(mac_status & MAC_STATUS_PCS_SYNCED))
4553 /* Set PLL lock range. */
4554 tg3_writephy(tp, 0x16, 0x8007);
4557 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4559 /* Wait for reset to complete. */
4560 /* XXX schedule_timeout() ... */
4561 for (i = 0; i < 500; i++)
4564 /* Config mode; select PMA/Ch 1 regs. */
4565 tg3_writephy(tp, 0x10, 0x8411);
4567 /* Enable auto-lock and comdet, select txclk for tx. */
4568 tg3_writephy(tp, 0x11, 0x0a10);
4570 tg3_writephy(tp, 0x18, 0x00a0);
4571 tg3_writephy(tp, 0x16, 0x41ff);
4573 /* Assert and deassert POR. */
4574 tg3_writephy(tp, 0x13, 0x0400);
4576 tg3_writephy(tp, 0x13, 0x0000);
4578 tg3_writephy(tp, 0x11, 0x0a50);
4580 tg3_writephy(tp, 0x11, 0x0a10);
4582 /* Wait for signal to stabilize */
4583 /* XXX schedule_timeout() ... */
4584 for (i = 0; i < 15000; i++)
4587 /* Deselect the channel register so we can read the PHYID
4590 tg3_writephy(tp, 0x10, 0x8011);
4593 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4596 u32 sg_dig_ctrl, sg_dig_status;
4597 u32 serdes_cfg, expected_sg_dig_ctrl;
4598 int workaround, port_a;
4599 int current_link_up;
4602 expected_sg_dig_ctrl = 0;
4605 current_link_up = 0;
4607 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4608 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4610 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4613 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4614 /* preserve bits 20-23 for voltage regulator */
4615 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4618 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4620 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4621 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4623 u32 val = serdes_cfg;
4629 tw32_f(MAC_SERDES_CFG, val);
4632 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4634 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4635 tg3_setup_flow_control(tp, 0, 0);
4636 current_link_up = 1;
4641 /* Want auto-negotiation. */
4642 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4644 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4645 if (flowctrl & ADVERTISE_1000XPAUSE)
4646 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4647 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4648 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4650 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4651 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4652 tp->serdes_counter &&
4653 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4654 MAC_STATUS_RCVD_CFG)) ==
4655 MAC_STATUS_PCS_SYNCED)) {
4656 tp->serdes_counter--;
4657 current_link_up = 1;
4662 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4663 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4665 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4667 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4668 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4669 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4670 MAC_STATUS_SIGNAL_DET)) {
4671 sg_dig_status = tr32(SG_DIG_STATUS);
4672 mac_status = tr32(MAC_STATUS);
4674 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4675 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4676 u32 local_adv = 0, remote_adv = 0;
4678 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4679 local_adv |= ADVERTISE_1000XPAUSE;
4680 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4681 local_adv |= ADVERTISE_1000XPSE_ASYM;
4683 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4684 remote_adv |= LPA_1000XPAUSE;
4685 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4686 remote_adv |= LPA_1000XPAUSE_ASYM;
4688 tg3_setup_flow_control(tp, local_adv, remote_adv);
4689 current_link_up = 1;
4690 tp->serdes_counter = 0;
4691 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4692 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4693 if (tp->serdes_counter)
4694 tp->serdes_counter--;
4697 u32 val = serdes_cfg;
4704 tw32_f(MAC_SERDES_CFG, val);
4707 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4710 /* Link parallel detection - link is up */
4711 /* only if we have PCS_SYNC and not */
4712 /* receiving config code words */
4713 mac_status = tr32(MAC_STATUS);
4714 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4715 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4716 tg3_setup_flow_control(tp, 0, 0);
4717 current_link_up = 1;
4719 TG3_PHYFLG_PARALLEL_DETECT;
4720 tp->serdes_counter =
4721 SERDES_PARALLEL_DET_TIMEOUT;
4723 goto restart_autoneg;
4727 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4728 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4732 return current_link_up;
4735 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4737 int current_link_up = 0;
4739 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4742 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4743 u32 txflags, rxflags;
4746 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4747 u32 local_adv = 0, remote_adv = 0;
4749 if (txflags & ANEG_CFG_PS1)
4750 local_adv |= ADVERTISE_1000XPAUSE;
4751 if (txflags & ANEG_CFG_PS2)
4752 local_adv |= ADVERTISE_1000XPSE_ASYM;
4754 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4755 remote_adv |= LPA_1000XPAUSE;
4756 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4757 remote_adv |= LPA_1000XPAUSE_ASYM;
4759 tg3_setup_flow_control(tp, local_adv, remote_adv);
4761 current_link_up = 1;
4763 for (i = 0; i < 30; i++) {
4766 (MAC_STATUS_SYNC_CHANGED |
4767 MAC_STATUS_CFG_CHANGED));
4769 if ((tr32(MAC_STATUS) &
4770 (MAC_STATUS_SYNC_CHANGED |
4771 MAC_STATUS_CFG_CHANGED)) == 0)
4775 mac_status = tr32(MAC_STATUS);
4776 if (current_link_up == 0 &&
4777 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4778 !(mac_status & MAC_STATUS_RCVD_CFG))
4779 current_link_up = 1;
4781 tg3_setup_flow_control(tp, 0, 0);
4783 /* Forcing 1000FD link up. */
4784 current_link_up = 1;
4786 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4789 tw32_f(MAC_MODE, tp->mac_mode);
4794 return current_link_up;
4797 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4800 u16 orig_active_speed;
4801 u8 orig_active_duplex;
4803 int current_link_up;
4806 orig_pause_cfg = tp->link_config.active_flowctrl;
4807 orig_active_speed = tp->link_config.active_speed;
4808 orig_active_duplex = tp->link_config.active_duplex;
4810 if (!tg3_flag(tp, HW_AUTONEG) &&
4811 netif_carrier_ok(tp->dev) &&
4812 tg3_flag(tp, INIT_COMPLETE)) {
4813 mac_status = tr32(MAC_STATUS);
4814 mac_status &= (MAC_STATUS_PCS_SYNCED |
4815 MAC_STATUS_SIGNAL_DET |
4816 MAC_STATUS_CFG_CHANGED |
4817 MAC_STATUS_RCVD_CFG);
4818 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4819 MAC_STATUS_SIGNAL_DET)) {
4820 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4821 MAC_STATUS_CFG_CHANGED));
4826 tw32_f(MAC_TX_AUTO_NEG, 0);
4828 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4829 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4830 tw32_f(MAC_MODE, tp->mac_mode);
4833 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4834 tg3_init_bcm8002(tp);
4836 /* Enable link change event even when serdes polling. */
4837 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4840 current_link_up = 0;
4841 mac_status = tr32(MAC_STATUS);
4843 if (tg3_flag(tp, HW_AUTONEG))
4844 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4846 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4848 tp->napi[0].hw_status->status =
4849 (SD_STATUS_UPDATED |
4850 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4852 for (i = 0; i < 100; i++) {
4853 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4854 MAC_STATUS_CFG_CHANGED));
4856 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4857 MAC_STATUS_CFG_CHANGED |
4858 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4862 mac_status = tr32(MAC_STATUS);
4863 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4864 current_link_up = 0;
4865 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4866 tp->serdes_counter == 0) {
4867 tw32_f(MAC_MODE, (tp->mac_mode |
4868 MAC_MODE_SEND_CONFIGS));
4870 tw32_f(MAC_MODE, tp->mac_mode);
4874 if (current_link_up == 1) {
4875 tp->link_config.active_speed = SPEED_1000;
4876 tp->link_config.active_duplex = DUPLEX_FULL;
4877 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4878 LED_CTRL_LNKLED_OVERRIDE |
4879 LED_CTRL_1000MBPS_ON));
4881 tp->link_config.active_speed = SPEED_INVALID;
4882 tp->link_config.active_duplex = DUPLEX_INVALID;
4883 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4884 LED_CTRL_LNKLED_OVERRIDE |
4885 LED_CTRL_TRAFFIC_OVERRIDE));
4888 if (current_link_up != netif_carrier_ok(tp->dev)) {
4889 if (current_link_up)
4890 netif_carrier_on(tp->dev);
4892 netif_carrier_off(tp->dev);
4893 tg3_link_report(tp);
4895 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4896 if (orig_pause_cfg != now_pause_cfg ||
4897 orig_active_speed != tp->link_config.active_speed ||
4898 orig_active_duplex != tp->link_config.active_duplex)
4899 tg3_link_report(tp);
4905 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4907 int current_link_up, err = 0;
4911 u32 local_adv, remote_adv;
4913 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4914 tw32_f(MAC_MODE, tp->mac_mode);
4920 (MAC_STATUS_SYNC_CHANGED |
4921 MAC_STATUS_CFG_CHANGED |
4922 MAC_STATUS_MI_COMPLETION |
4923 MAC_STATUS_LNKSTATE_CHANGED));
4929 current_link_up = 0;
4930 current_speed = SPEED_INVALID;
4931 current_duplex = DUPLEX_INVALID;
4933 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4934 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4936 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4937 bmsr |= BMSR_LSTATUS;
4939 bmsr &= ~BMSR_LSTATUS;
4942 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4944 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4945 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4946 /* do nothing, just check for link up at the end */
4947 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4950 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4951 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4952 ADVERTISE_1000XPAUSE |
4953 ADVERTISE_1000XPSE_ASYM |
4956 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4958 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4959 new_adv |= ADVERTISE_1000XHALF;
4960 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4961 new_adv |= ADVERTISE_1000XFULL;
4963 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4964 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4965 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4966 tg3_writephy(tp, MII_BMCR, bmcr);
4968 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4969 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4970 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4977 bmcr &= ~BMCR_SPEED1000;
4978 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4980 if (tp->link_config.duplex == DUPLEX_FULL)
4981 new_bmcr |= BMCR_FULLDPLX;
4983 if (new_bmcr != bmcr) {
4984 /* BMCR_SPEED1000 is a reserved bit that needs
4985 * to be set on write.
4987 new_bmcr |= BMCR_SPEED1000;
4989 /* Force a linkdown */
4990 if (netif_carrier_ok(tp->dev)) {
4993 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4994 adv &= ~(ADVERTISE_1000XFULL |
4995 ADVERTISE_1000XHALF |
4997 tg3_writephy(tp, MII_ADVERTISE, adv);
4998 tg3_writephy(tp, MII_BMCR, bmcr |
5002 netif_carrier_off(tp->dev);
5004 tg3_writephy(tp, MII_BMCR, new_bmcr);
5006 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5007 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5008 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5010 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5011 bmsr |= BMSR_LSTATUS;
5013 bmsr &= ~BMSR_LSTATUS;
5015 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5019 if (bmsr & BMSR_LSTATUS) {
5020 current_speed = SPEED_1000;
5021 current_link_up = 1;
5022 if (bmcr & BMCR_FULLDPLX)
5023 current_duplex = DUPLEX_FULL;
5025 current_duplex = DUPLEX_HALF;
5030 if (bmcr & BMCR_ANENABLE) {
5033 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5034 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5035 common = local_adv & remote_adv;
5036 if (common & (ADVERTISE_1000XHALF |
5037 ADVERTISE_1000XFULL)) {
5038 if (common & ADVERTISE_1000XFULL)
5039 current_duplex = DUPLEX_FULL;
5041 current_duplex = DUPLEX_HALF;
5042 } else if (!tg3_flag(tp, 5780_CLASS)) {
5043 /* Link is up via parallel detect */
5045 current_link_up = 0;
5050 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5051 tg3_setup_flow_control(tp, local_adv, remote_adv);
5053 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5054 if (tp->link_config.active_duplex == DUPLEX_HALF)
5055 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5057 tw32_f(MAC_MODE, tp->mac_mode);
5060 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5062 tp->link_config.active_speed = current_speed;
5063 tp->link_config.active_duplex = current_duplex;
5065 if (current_link_up != netif_carrier_ok(tp->dev)) {
5066 if (current_link_up)
5067 netif_carrier_on(tp->dev);
5069 netif_carrier_off(tp->dev);
5070 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5072 tg3_link_report(tp);
5077 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5079 if (tp->serdes_counter) {
5080 /* Give autoneg time to complete. */
5081 tp->serdes_counter--;
5085 if (!netif_carrier_ok(tp->dev) &&
5086 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5089 tg3_readphy(tp, MII_BMCR, &bmcr);
5090 if (bmcr & BMCR_ANENABLE) {
5093 /* Select shadow register 0x1f */
5094 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5095 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5097 /* Select expansion interrupt status register */
5098 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5099 MII_TG3_DSP_EXP1_INT_STAT);
5100 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5101 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5103 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5104 /* We have signal detect and not receiving
5105 * config code words, link is up by parallel
5109 bmcr &= ~BMCR_ANENABLE;
5110 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5111 tg3_writephy(tp, MII_BMCR, bmcr);
5112 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5115 } else if (netif_carrier_ok(tp->dev) &&
5116 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5117 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5120 /* Select expansion interrupt status register */
5121 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5122 MII_TG3_DSP_EXP1_INT_STAT);
5123 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5127 /* Config code words received, turn on autoneg. */
5128 tg3_readphy(tp, MII_BMCR, &bmcr);
5129 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5131 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5137 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5142 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5143 err = tg3_setup_fiber_phy(tp, force_reset);
5144 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5145 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5147 err = tg3_setup_copper_phy(tp, force_reset);
5149 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5152 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5153 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5155 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5160 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5161 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5162 tw32(GRC_MISC_CFG, val);
5165 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5166 (6 << TX_LENGTHS_IPG_SHIFT);
5167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5168 val |= tr32(MAC_TX_LENGTHS) &
5169 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5170 TX_LENGTHS_CNT_DWN_VAL_MSK);
5172 if (tp->link_config.active_speed == SPEED_1000 &&
5173 tp->link_config.active_duplex == DUPLEX_HALF)
5174 tw32(MAC_TX_LENGTHS, val |
5175 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5177 tw32(MAC_TX_LENGTHS, val |
5178 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5180 if (!tg3_flag(tp, 5705_PLUS)) {
5181 if (netif_carrier_ok(tp->dev)) {
5182 tw32(HOSTCC_STAT_COAL_TICKS,
5183 tp->coal.stats_block_coalesce_usecs);
5185 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5189 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5190 val = tr32(PCIE_PWR_MGMT_THRESH);
5191 if (!netif_carrier_ok(tp->dev))
5192 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5195 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5196 tw32(PCIE_PWR_MGMT_THRESH, val);
5202 static inline int tg3_irq_sync(struct tg3 *tp)
5204 return tp->irq_sync;
5207 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5211 dst = (u32 *)((u8 *)dst + off);
5212 for (i = 0; i < len; i += sizeof(u32))
5213 *dst++ = tr32(off + i);
5216 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5218 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5219 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5220 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5221 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5222 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5223 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5224 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5225 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5226 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5227 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5228 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5229 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5230 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5231 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5232 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5233 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5234 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5235 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5236 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5238 if (tg3_flag(tp, SUPPORT_MSIX))
5239 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5241 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5242 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5243 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5244 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5245 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5246 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5247 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5248 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5250 if (!tg3_flag(tp, 5705_PLUS)) {
5251 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5252 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5253 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5256 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5257 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5258 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5259 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5260 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5262 if (tg3_flag(tp, NVRAM))
5263 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5266 static void tg3_dump_state(struct tg3 *tp)
5271 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5273 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5277 if (tg3_flag(tp, PCI_EXPRESS)) {
5278 /* Read up to but not including private PCI registers */
5279 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5280 regs[i / sizeof(u32)] = tr32(i);
5282 tg3_dump_legacy_regs(tp, regs);
5284 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5285 if (!regs[i + 0] && !regs[i + 1] &&
5286 !regs[i + 2] && !regs[i + 3])
5289 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5291 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5296 for (i = 0; i < tp->irq_cnt; i++) {
5297 struct tg3_napi *tnapi = &tp->napi[i];
5299 /* SW status block */
5301 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5303 tnapi->hw_status->status,
5304 tnapi->hw_status->status_tag,
5305 tnapi->hw_status->rx_jumbo_consumer,
5306 tnapi->hw_status->rx_consumer,
5307 tnapi->hw_status->rx_mini_consumer,
5308 tnapi->hw_status->idx[0].rx_producer,
5309 tnapi->hw_status->idx[0].tx_consumer);
5312 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5314 tnapi->last_tag, tnapi->last_irq_tag,
5315 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5317 tnapi->prodring.rx_std_prod_idx,
5318 tnapi->prodring.rx_std_cons_idx,
5319 tnapi->prodring.rx_jmb_prod_idx,
5320 tnapi->prodring.rx_jmb_cons_idx);
5324 /* This is called whenever we suspect that the system chipset is re-
5325 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5326 * is bogus tx completions. We try to recover by setting the
5327 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5330 static void tg3_tx_recover(struct tg3 *tp)
5332 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5333 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5335 netdev_warn(tp->dev,
5336 "The system may be re-ordering memory-mapped I/O "
5337 "cycles to the network device, attempting to recover. "
5338 "Please report the problem to the driver maintainer "
5339 "and include system chipset information.\n");
5341 spin_lock(&tp->lock);
5342 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5343 spin_unlock(&tp->lock);
5346 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5348 /* Tell compiler to fetch tx indices from memory. */
5350 return tnapi->tx_pending -
5351 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5354 /* Tigon3 never reports partial packet sends. So we do not
5355 * need special logic to handle SKBs that have not had all
5356 * of their frags sent yet, like SunGEM does.
5358 static void tg3_tx(struct tg3_napi *tnapi)
5360 struct tg3 *tp = tnapi->tp;
5361 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5362 u32 sw_idx = tnapi->tx_cons;
5363 struct netdev_queue *txq;
5364 int index = tnapi - tp->napi;
5366 if (tg3_flag(tp, ENABLE_TSS))
5369 txq = netdev_get_tx_queue(tp->dev, index);
5371 while (sw_idx != hw_idx) {
5372 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5373 struct sk_buff *skb = ri->skb;
5376 if (unlikely(skb == NULL)) {
5381 pci_unmap_single(tp->pdev,
5382 dma_unmap_addr(ri, mapping),
5388 while (ri->fragmented) {
5389 ri->fragmented = false;
5390 sw_idx = NEXT_TX(sw_idx);
5391 ri = &tnapi->tx_buffers[sw_idx];
5394 sw_idx = NEXT_TX(sw_idx);
5396 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5397 ri = &tnapi->tx_buffers[sw_idx];
5398 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5401 pci_unmap_page(tp->pdev,
5402 dma_unmap_addr(ri, mapping),
5403 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5406 while (ri->fragmented) {
5407 ri->fragmented = false;
5408 sw_idx = NEXT_TX(sw_idx);
5409 ri = &tnapi->tx_buffers[sw_idx];
5412 sw_idx = NEXT_TX(sw_idx);
5417 if (unlikely(tx_bug)) {
5423 tnapi->tx_cons = sw_idx;
5425 /* Need to make the tx_cons update visible to tg3_start_xmit()
5426 * before checking for netif_queue_stopped(). Without the
5427 * memory barrier, there is a small possibility that tg3_start_xmit()
5428 * will miss it and cause the queue to be stopped forever.
5432 if (unlikely(netif_tx_queue_stopped(txq) &&
5433 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5434 __netif_tx_lock(txq, smp_processor_id());
5435 if (netif_tx_queue_stopped(txq) &&
5436 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5437 netif_tx_wake_queue(txq);
5438 __netif_tx_unlock(txq);
5442 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5447 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5448 map_sz, PCI_DMA_FROMDEVICE);
5449 dev_kfree_skb_any(ri->skb);
5453 /* Returns size of skb allocated or < 0 on error.
5455 * We only need to fill in the address because the other members
5456 * of the RX descriptor are invariant, see tg3_init_rings.
5458 * Note the purposeful assymetry of cpu vs. chip accesses. For
5459 * posting buffers we only dirty the first cache line of the RX
5460 * descriptor (containing the address). Whereas for the RX status
5461 * buffers the cpu only reads the last cacheline of the RX descriptor
5462 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5464 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5465 u32 opaque_key, u32 dest_idx_unmasked)
5467 struct tg3_rx_buffer_desc *desc;
5468 struct ring_info *map;
5469 struct sk_buff *skb;
5471 int skb_size, dest_idx;
5473 switch (opaque_key) {
5474 case RXD_OPAQUE_RING_STD:
5475 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5476 desc = &tpr->rx_std[dest_idx];
5477 map = &tpr->rx_std_buffers[dest_idx];
5478 skb_size = tp->rx_pkt_map_sz;
5481 case RXD_OPAQUE_RING_JUMBO:
5482 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5483 desc = &tpr->rx_jmb[dest_idx].std;
5484 map = &tpr->rx_jmb_buffers[dest_idx];
5485 skb_size = TG3_RX_JMB_MAP_SZ;
5492 /* Do not overwrite any of the map or rp information
5493 * until we are sure we can commit to a new buffer.
5495 * Callers depend upon this behavior and assume that
5496 * we leave everything unchanged if we fail.
5498 skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5502 skb_reserve(skb, TG3_RX_OFFSET(tp));
5504 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5505 PCI_DMA_FROMDEVICE);
5506 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5512 dma_unmap_addr_set(map, mapping, mapping);
5514 desc->addr_hi = ((u64)mapping >> 32);
5515 desc->addr_lo = ((u64)mapping & 0xffffffff);
5520 /* We only need to move over in the address because the other
5521 * members of the RX descriptor are invariant. See notes above
5522 * tg3_alloc_rx_skb for full details.
5524 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5525 struct tg3_rx_prodring_set *dpr,
5526 u32 opaque_key, int src_idx,
5527 u32 dest_idx_unmasked)
5529 struct tg3 *tp = tnapi->tp;
5530 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5531 struct ring_info *src_map, *dest_map;
5532 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5535 switch (opaque_key) {
5536 case RXD_OPAQUE_RING_STD:
5537 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5538 dest_desc = &dpr->rx_std[dest_idx];
5539 dest_map = &dpr->rx_std_buffers[dest_idx];
5540 src_desc = &spr->rx_std[src_idx];
5541 src_map = &spr->rx_std_buffers[src_idx];
5544 case RXD_OPAQUE_RING_JUMBO:
5545 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5546 dest_desc = &dpr->rx_jmb[dest_idx].std;
5547 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5548 src_desc = &spr->rx_jmb[src_idx].std;
5549 src_map = &spr->rx_jmb_buffers[src_idx];
5556 dest_map->skb = src_map->skb;
5557 dma_unmap_addr_set(dest_map, mapping,
5558 dma_unmap_addr(src_map, mapping));
5559 dest_desc->addr_hi = src_desc->addr_hi;
5560 dest_desc->addr_lo = src_desc->addr_lo;
5562 /* Ensure that the update to the skb happens after the physical
5563 * addresses have been transferred to the new BD location.
5567 src_map->skb = NULL;
5570 /* The RX ring scheme is composed of multiple rings which post fresh
5571 * buffers to the chip, and one special ring the chip uses to report
5572 * status back to the host.
5574 * The special ring reports the status of received packets to the
5575 * host. The chip does not write into the original descriptor the
5576 * RX buffer was obtained from. The chip simply takes the original
5577 * descriptor as provided by the host, updates the status and length
5578 * field, then writes this into the next status ring entry.
5580 * Each ring the host uses to post buffers to the chip is described
5581 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5582 * it is first placed into the on-chip ram. When the packet's length
5583 * is known, it walks down the TG3_BDINFO entries to select the ring.
5584 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5585 * which is within the range of the new packet's length is chosen.
5587 * The "separate ring for rx status" scheme may sound queer, but it makes
5588 * sense from a cache coherency perspective. If only the host writes
5589 * to the buffer post rings, and only the chip writes to the rx status
5590 * rings, then cache lines never move beyond shared-modified state.
5591 * If both the host and chip were to write into the same ring, cache line
5592 * eviction could occur since both entities want it in an exclusive state.
5594 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5596 struct tg3 *tp = tnapi->tp;
5597 u32 work_mask, rx_std_posted = 0;
5598 u32 std_prod_idx, jmb_prod_idx;
5599 u32 sw_idx = tnapi->rx_rcb_ptr;
5602 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5604 hw_idx = *(tnapi->rx_rcb_prod_idx);
5606 * We need to order the read of hw_idx and the read of
5607 * the opaque cookie.
5612 std_prod_idx = tpr->rx_std_prod_idx;
5613 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5614 while (sw_idx != hw_idx && budget > 0) {
5615 struct ring_info *ri;
5616 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5618 struct sk_buff *skb;
5619 dma_addr_t dma_addr;
5620 u32 opaque_key, desc_idx, *post_ptr;
5622 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5623 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5624 if (opaque_key == RXD_OPAQUE_RING_STD) {
5625 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5626 dma_addr = dma_unmap_addr(ri, mapping);
5628 post_ptr = &std_prod_idx;
5630 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5631 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5632 dma_addr = dma_unmap_addr(ri, mapping);
5634 post_ptr = &jmb_prod_idx;
5636 goto next_pkt_nopost;
5638 work_mask |= opaque_key;
5640 if (desc->err_vlan & RXD_ERR_MASK) {
5642 tg3_recycle_rx(tnapi, tpr, opaque_key,
5643 desc_idx, *post_ptr);
5645 /* Other statistics kept track of by card. */
5650 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5653 if (len > TG3_RX_COPY_THRESH(tp)) {
5656 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5661 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5662 PCI_DMA_FROMDEVICE);
5664 /* Ensure that the update to the skb happens
5665 * after the usage of the old DMA mapping.
5673 struct sk_buff *copy_skb;
5675 tg3_recycle_rx(tnapi, tpr, opaque_key,
5676 desc_idx, *post_ptr);
5678 copy_skb = netdev_alloc_skb(tp->dev, len +
5680 if (copy_skb == NULL)
5681 goto drop_it_no_recycle;
5683 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5684 skb_put(copy_skb, len);
5685 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5686 skb_copy_from_linear_data(skb, copy_skb->data, len);
5687 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5689 /* We'll reuse the original ring buffer. */
5693 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5694 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5695 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5696 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5697 skb->ip_summed = CHECKSUM_UNNECESSARY;
5699 skb_checksum_none_assert(skb);
5701 skb->protocol = eth_type_trans(skb, tp->dev);
5703 if (len > (tp->dev->mtu + ETH_HLEN) &&
5704 skb->protocol != htons(ETH_P_8021Q)) {
5706 goto drop_it_no_recycle;
5709 if (desc->type_flags & RXD_FLAG_VLAN &&
5710 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5711 __vlan_hwaccel_put_tag(skb,
5712 desc->err_vlan & RXD_VLAN_MASK);
5714 napi_gro_receive(&tnapi->napi, skb);
5722 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5723 tpr->rx_std_prod_idx = std_prod_idx &
5724 tp->rx_std_ring_mask;
5725 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5726 tpr->rx_std_prod_idx);
5727 work_mask &= ~RXD_OPAQUE_RING_STD;
5732 sw_idx &= tp->rx_ret_ring_mask;
5734 /* Refresh hw_idx to see if there is new work */
5735 if (sw_idx == hw_idx) {
5736 hw_idx = *(tnapi->rx_rcb_prod_idx);
5741 /* ACK the status ring. */
5742 tnapi->rx_rcb_ptr = sw_idx;
5743 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5745 /* Refill RX ring(s). */
5746 if (!tg3_flag(tp, ENABLE_RSS)) {
5747 if (work_mask & RXD_OPAQUE_RING_STD) {
5748 tpr->rx_std_prod_idx = std_prod_idx &
5749 tp->rx_std_ring_mask;
5750 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5751 tpr->rx_std_prod_idx);
5753 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5754 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5755 tp->rx_jmb_ring_mask;
5756 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5757 tpr->rx_jmb_prod_idx);
5760 } else if (work_mask) {
5761 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5762 * updated before the producer indices can be updated.
5766 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5767 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5769 if (tnapi != &tp->napi[1])
5770 napi_schedule(&tp->napi[1].napi);
5776 static void tg3_poll_link(struct tg3 *tp)
5778 /* handle link change and other phy events */
5779 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5780 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5782 if (sblk->status & SD_STATUS_LINK_CHG) {
5783 sblk->status = SD_STATUS_UPDATED |
5784 (sblk->status & ~SD_STATUS_LINK_CHG);
5785 spin_lock(&tp->lock);
5786 if (tg3_flag(tp, USE_PHYLIB)) {
5788 (MAC_STATUS_SYNC_CHANGED |
5789 MAC_STATUS_CFG_CHANGED |
5790 MAC_STATUS_MI_COMPLETION |
5791 MAC_STATUS_LNKSTATE_CHANGED));
5794 tg3_setup_phy(tp, 0);
5795 spin_unlock(&tp->lock);
5800 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5801 struct tg3_rx_prodring_set *dpr,
5802 struct tg3_rx_prodring_set *spr)
5804 u32 si, di, cpycnt, src_prod_idx;
5808 src_prod_idx = spr->rx_std_prod_idx;
5810 /* Make sure updates to the rx_std_buffers[] entries and the
5811 * standard producer index are seen in the correct order.
5815 if (spr->rx_std_cons_idx == src_prod_idx)
5818 if (spr->rx_std_cons_idx < src_prod_idx)
5819 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5821 cpycnt = tp->rx_std_ring_mask + 1 -
5822 spr->rx_std_cons_idx;
5824 cpycnt = min(cpycnt,
5825 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5827 si = spr->rx_std_cons_idx;
5828 di = dpr->rx_std_prod_idx;
5830 for (i = di; i < di + cpycnt; i++) {
5831 if (dpr->rx_std_buffers[i].skb) {
5841 /* Ensure that updates to the rx_std_buffers ring and the
5842 * shadowed hardware producer ring from tg3_recycle_skb() are
5843 * ordered correctly WRT the skb check above.
5847 memcpy(&dpr->rx_std_buffers[di],
5848 &spr->rx_std_buffers[si],
5849 cpycnt * sizeof(struct ring_info));
5851 for (i = 0; i < cpycnt; i++, di++, si++) {
5852 struct tg3_rx_buffer_desc *sbd, *dbd;
5853 sbd = &spr->rx_std[si];
5854 dbd = &dpr->rx_std[di];
5855 dbd->addr_hi = sbd->addr_hi;
5856 dbd->addr_lo = sbd->addr_lo;
5859 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5860 tp->rx_std_ring_mask;
5861 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5862 tp->rx_std_ring_mask;
5866 src_prod_idx = spr->rx_jmb_prod_idx;
5868 /* Make sure updates to the rx_jmb_buffers[] entries and
5869 * the jumbo producer index are seen in the correct order.
5873 if (spr->rx_jmb_cons_idx == src_prod_idx)
5876 if (spr->rx_jmb_cons_idx < src_prod_idx)
5877 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5879 cpycnt = tp->rx_jmb_ring_mask + 1 -
5880 spr->rx_jmb_cons_idx;
5882 cpycnt = min(cpycnt,
5883 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5885 si = spr->rx_jmb_cons_idx;
5886 di = dpr->rx_jmb_prod_idx;
5888 for (i = di; i < di + cpycnt; i++) {
5889 if (dpr->rx_jmb_buffers[i].skb) {
5899 /* Ensure that updates to the rx_jmb_buffers ring and the
5900 * shadowed hardware producer ring from tg3_recycle_skb() are
5901 * ordered correctly WRT the skb check above.
5905 memcpy(&dpr->rx_jmb_buffers[di],
5906 &spr->rx_jmb_buffers[si],
5907 cpycnt * sizeof(struct ring_info));
5909 for (i = 0; i < cpycnt; i++, di++, si++) {
5910 struct tg3_rx_buffer_desc *sbd, *dbd;
5911 sbd = &spr->rx_jmb[si].std;
5912 dbd = &dpr->rx_jmb[di].std;
5913 dbd->addr_hi = sbd->addr_hi;
5914 dbd->addr_lo = sbd->addr_lo;
5917 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5918 tp->rx_jmb_ring_mask;
5919 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5920 tp->rx_jmb_ring_mask;
5926 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5928 struct tg3 *tp = tnapi->tp;
5930 /* run TX completion thread */
5931 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5933 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5937 if (!tnapi->rx_rcb_prod_idx)
5940 /* run RX thread, within the bounds set by NAPI.
5941 * All RX "locking" is done by ensuring outside
5942 * code synchronizes with tg3->napi.poll()
5944 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5945 work_done += tg3_rx(tnapi, budget - work_done);
5947 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5948 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5950 u32 std_prod_idx = dpr->rx_std_prod_idx;
5951 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5953 for (i = 1; i < tp->irq_cnt; i++)
5954 err |= tg3_rx_prodring_xfer(tp, dpr,
5955 &tp->napi[i].prodring);
5959 if (std_prod_idx != dpr->rx_std_prod_idx)
5960 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5961 dpr->rx_std_prod_idx);
5963 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5964 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5965 dpr->rx_jmb_prod_idx);
5970 tw32_f(HOSTCC_MODE, tp->coal_now);
5976 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5978 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5979 schedule_work(&tp->reset_task);
5982 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5984 cancel_work_sync(&tp->reset_task);
5985 tg3_flag_clear(tp, RESET_TASK_PENDING);
5988 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5990 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5991 struct tg3 *tp = tnapi->tp;
5993 struct tg3_hw_status *sblk = tnapi->hw_status;
5996 work_done = tg3_poll_work(tnapi, work_done, budget);
5998 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6001 if (unlikely(work_done >= budget))
6004 /* tp->last_tag is used in tg3_int_reenable() below
6005 * to tell the hw how much work has been processed,
6006 * so we must read it before checking for more work.
6008 tnapi->last_tag = sblk->status_tag;
6009 tnapi->last_irq_tag = tnapi->last_tag;
6012 /* check for RX/TX work to do */
6013 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6014 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6015 napi_complete(napi);
6016 /* Reenable interrupts. */
6017 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6026 /* work_done is guaranteed to be less than budget. */
6027 napi_complete(napi);
6028 tg3_reset_task_schedule(tp);
6032 static void tg3_process_error(struct tg3 *tp)
6035 bool real_error = false;
6037 if (tg3_flag(tp, ERROR_PROCESSED))
6040 /* Check Flow Attention register */
6041 val = tr32(HOSTCC_FLOW_ATTN);
6042 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6043 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6047 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6048 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6052 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6053 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6062 tg3_flag_set(tp, ERROR_PROCESSED);
6063 tg3_reset_task_schedule(tp);
6066 static int tg3_poll(struct napi_struct *napi, int budget)
6068 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6069 struct tg3 *tp = tnapi->tp;
6071 struct tg3_hw_status *sblk = tnapi->hw_status;
6074 if (sblk->status & SD_STATUS_ERROR)
6075 tg3_process_error(tp);
6079 work_done = tg3_poll_work(tnapi, work_done, budget);
6081 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6084 if (unlikely(work_done >= budget))
6087 if (tg3_flag(tp, TAGGED_STATUS)) {
6088 /* tp->last_tag is used in tg3_int_reenable() below
6089 * to tell the hw how much work has been processed,
6090 * so we must read it before checking for more work.
6092 tnapi->last_tag = sblk->status_tag;
6093 tnapi->last_irq_tag = tnapi->last_tag;
6096 sblk->status &= ~SD_STATUS_UPDATED;
6098 if (likely(!tg3_has_work(tnapi))) {
6099 napi_complete(napi);
6100 tg3_int_reenable(tnapi);
6108 /* work_done is guaranteed to be less than budget. */
6109 napi_complete(napi);
6110 tg3_reset_task_schedule(tp);
6114 static void tg3_napi_disable(struct tg3 *tp)
6118 for (i = tp->irq_cnt - 1; i >= 0; i--)
6119 napi_disable(&tp->napi[i].napi);
6122 static void tg3_napi_enable(struct tg3 *tp)
6126 for (i = 0; i < tp->irq_cnt; i++)
6127 napi_enable(&tp->napi[i].napi);
6130 static void tg3_napi_init(struct tg3 *tp)
6134 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6135 for (i = 1; i < tp->irq_cnt; i++)
6136 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6139 static void tg3_napi_fini(struct tg3 *tp)
6143 for (i = 0; i < tp->irq_cnt; i++)
6144 netif_napi_del(&tp->napi[i].napi);
6147 static inline void tg3_netif_stop(struct tg3 *tp)
6149 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6150 tg3_napi_disable(tp);
6151 netif_tx_disable(tp->dev);
6154 static inline void tg3_netif_start(struct tg3 *tp)
6156 /* NOTE: unconditional netif_tx_wake_all_queues is only
6157 * appropriate so long as all callers are assured to
6158 * have free tx slots (such as after tg3_init_hw)
6160 netif_tx_wake_all_queues(tp->dev);
6162 tg3_napi_enable(tp);
6163 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6164 tg3_enable_ints(tp);
6167 static void tg3_irq_quiesce(struct tg3 *tp)
6171 BUG_ON(tp->irq_sync);
6176 for (i = 0; i < tp->irq_cnt; i++)
6177 synchronize_irq(tp->napi[i].irq_vec);
6180 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6181 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6182 * with as well. Most of the time, this is not necessary except when
6183 * shutting down the device.
6185 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6187 spin_lock_bh(&tp->lock);
6189 tg3_irq_quiesce(tp);
6192 static inline void tg3_full_unlock(struct tg3 *tp)
6194 spin_unlock_bh(&tp->lock);
6197 /* One-shot MSI handler - Chip automatically disables interrupt
6198 * after sending MSI so driver doesn't have to do it.
6200 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6202 struct tg3_napi *tnapi = dev_id;
6203 struct tg3 *tp = tnapi->tp;
6205 prefetch(tnapi->hw_status);
6207 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6209 if (likely(!tg3_irq_sync(tp)))
6210 napi_schedule(&tnapi->napi);
6215 /* MSI ISR - No need to check for interrupt sharing and no need to
6216 * flush status block and interrupt mailbox. PCI ordering rules
6217 * guarantee that MSI will arrive after the status block.
6219 static irqreturn_t tg3_msi(int irq, void *dev_id)
6221 struct tg3_napi *tnapi = dev_id;
6222 struct tg3 *tp = tnapi->tp;
6224 prefetch(tnapi->hw_status);
6226 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6228 * Writing any value to intr-mbox-0 clears PCI INTA# and
6229 * chip-internal interrupt pending events.
6230 * Writing non-zero to intr-mbox-0 additional tells the
6231 * NIC to stop sending us irqs, engaging "in-intr-handler"
6234 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6235 if (likely(!tg3_irq_sync(tp)))
6236 napi_schedule(&tnapi->napi);
6238 return IRQ_RETVAL(1);
6241 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6243 struct tg3_napi *tnapi = dev_id;
6244 struct tg3 *tp = tnapi->tp;
6245 struct tg3_hw_status *sblk = tnapi->hw_status;
6246 unsigned int handled = 1;
6248 /* In INTx mode, it is possible for the interrupt to arrive at
6249 * the CPU before the status block posted prior to the interrupt.
6250 * Reading the PCI State register will confirm whether the
6251 * interrupt is ours and will flush the status block.
6253 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6254 if (tg3_flag(tp, CHIP_RESETTING) ||
6255 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6262 * Writing any value to intr-mbox-0 clears PCI INTA# and
6263 * chip-internal interrupt pending events.
6264 * Writing non-zero to intr-mbox-0 additional tells the
6265 * NIC to stop sending us irqs, engaging "in-intr-handler"
6268 * Flush the mailbox to de-assert the IRQ immediately to prevent
6269 * spurious interrupts. The flush impacts performance but
6270 * excessive spurious interrupts can be worse in some cases.
6272 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6273 if (tg3_irq_sync(tp))
6275 sblk->status &= ~SD_STATUS_UPDATED;
6276 if (likely(tg3_has_work(tnapi))) {
6277 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6278 napi_schedule(&tnapi->napi);
6280 /* No work, shared interrupt perhaps? re-enable
6281 * interrupts, and flush that PCI write
6283 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6287 return IRQ_RETVAL(handled);
6290 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6292 struct tg3_napi *tnapi = dev_id;
6293 struct tg3 *tp = tnapi->tp;
6294 struct tg3_hw_status *sblk = tnapi->hw_status;
6295 unsigned int handled = 1;
6297 /* In INTx mode, it is possible for the interrupt to arrive at
6298 * the CPU before the status block posted prior to the interrupt.
6299 * Reading the PCI State register will confirm whether the
6300 * interrupt is ours and will flush the status block.
6302 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6303 if (tg3_flag(tp, CHIP_RESETTING) ||
6304 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6311 * writing any value to intr-mbox-0 clears PCI INTA# and
6312 * chip-internal interrupt pending events.
6313 * writing non-zero to intr-mbox-0 additional tells the
6314 * NIC to stop sending us irqs, engaging "in-intr-handler"
6317 * Flush the mailbox to de-assert the IRQ immediately to prevent
6318 * spurious interrupts. The flush impacts performance but
6319 * excessive spurious interrupts can be worse in some cases.
6321 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6324 * In a shared interrupt configuration, sometimes other devices'
6325 * interrupts will scream. We record the current status tag here
6326 * so that the above check can report that the screaming interrupts
6327 * are unhandled. Eventually they will be silenced.
6329 tnapi->last_irq_tag = sblk->status_tag;
6331 if (tg3_irq_sync(tp))
6334 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6336 napi_schedule(&tnapi->napi);
6339 return IRQ_RETVAL(handled);
6342 /* ISR for interrupt test */
6343 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6345 struct tg3_napi *tnapi = dev_id;
6346 struct tg3 *tp = tnapi->tp;
6347 struct tg3_hw_status *sblk = tnapi->hw_status;
6349 if ((sblk->status & SD_STATUS_UPDATED) ||
6350 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6351 tg3_disable_ints(tp);
6352 return IRQ_RETVAL(1);
6354 return IRQ_RETVAL(0);
6357 static int tg3_init_hw(struct tg3 *, int);
6358 static int tg3_halt(struct tg3 *, int, int);
6360 /* Restart hardware after configuration changes, self-test, etc.
6361 * Invoked with tp->lock held.
6363 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6364 __releases(tp->lock)
6365 __acquires(tp->lock)
6369 err = tg3_init_hw(tp, reset_phy);
6372 "Failed to re-initialize device, aborting\n");
6373 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6374 tg3_full_unlock(tp);
6375 del_timer_sync(&tp->timer);
6377 tg3_napi_enable(tp);
6379 tg3_full_lock(tp, 0);
6384 #ifdef CONFIG_NET_POLL_CONTROLLER
6385 static void tg3_poll_controller(struct net_device *dev)
6388 struct tg3 *tp = netdev_priv(dev);
6390 if (tg3_irq_sync(tp))
6393 for (i = 0; i < tp->irq_cnt; i++)
6394 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6398 static void tg3_reset_task(struct work_struct *work)
6400 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6403 tg3_full_lock(tp, 0);
6405 if (!netif_running(tp->dev)) {
6406 tg3_flag_clear(tp, RESET_TASK_PENDING);
6407 tg3_full_unlock(tp);
6411 tg3_full_unlock(tp);
6417 tg3_full_lock(tp, 1);
6419 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6420 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6421 tp->write32_rx_mbox = tg3_write_flush_reg32;
6422 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6423 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6426 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6427 err = tg3_init_hw(tp, 1);
6431 tg3_netif_start(tp);
6434 tg3_full_unlock(tp);
6439 tg3_flag_clear(tp, RESET_TASK_PENDING);
6442 static void tg3_tx_timeout(struct net_device *dev)
6444 struct tg3 *tp = netdev_priv(dev);
6446 if (netif_msg_tx_err(tp)) {
6447 netdev_err(dev, "transmit timed out, resetting\n");
6451 tg3_reset_task_schedule(tp);
6454 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6455 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6457 u32 base = (u32) mapping & 0xffffffff;
6459 return (base > 0xffffdcc0) && (base + len + 8 < base);
6462 /* Test for DMA addresses > 40-bit */
6463 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6466 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6467 if (tg3_flag(tp, 40BIT_DMA_BUG))
6468 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6475 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6476 dma_addr_t mapping, u32 len, u32 flags,
6479 txbd->addr_hi = ((u64) mapping >> 32);
6480 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6481 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6482 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6485 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6486 dma_addr_t map, u32 len, u32 flags,
6489 struct tg3 *tp = tnapi->tp;
6492 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6495 if (tg3_4g_overflow_test(map, len))
6498 if (tg3_40bit_overflow_test(tp, map, len))
6501 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6502 u32 prvidx = *entry;
6503 u32 tmp_flag = flags & ~TXD_FLAG_END;
6504 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6505 u32 frag_len = TG3_TX_BD_DMA_MAX;
6506 len -= TG3_TX_BD_DMA_MAX;
6508 /* Avoid the 8byte DMA problem */
6510 len += TG3_TX_BD_DMA_MAX / 2;
6511 frag_len = TG3_TX_BD_DMA_MAX / 2;
6514 tnapi->tx_buffers[*entry].fragmented = true;
6516 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6517 frag_len, tmp_flag, mss, vlan);
6520 *entry = NEXT_TX(*entry);
6527 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6528 len, flags, mss, vlan);
6530 *entry = NEXT_TX(*entry);
6533 tnapi->tx_buffers[prvidx].fragmented = false;
6537 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6538 len, flags, mss, vlan);
6539 *entry = NEXT_TX(*entry);
6545 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6548 struct sk_buff *skb;
6549 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6554 pci_unmap_single(tnapi->tp->pdev,
6555 dma_unmap_addr(txb, mapping),
6559 while (txb->fragmented) {
6560 txb->fragmented = false;
6561 entry = NEXT_TX(entry);
6562 txb = &tnapi->tx_buffers[entry];
6565 for (i = 0; i <= last; i++) {
6566 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6568 entry = NEXT_TX(entry);
6569 txb = &tnapi->tx_buffers[entry];
6571 pci_unmap_page(tnapi->tp->pdev,
6572 dma_unmap_addr(txb, mapping),
6573 skb_frag_size(frag), PCI_DMA_TODEVICE);
6575 while (txb->fragmented) {
6576 txb->fragmented = false;
6577 entry = NEXT_TX(entry);
6578 txb = &tnapi->tx_buffers[entry];
6583 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6584 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6585 struct sk_buff **pskb,
6586 u32 *entry, u32 *budget,
6587 u32 base_flags, u32 mss, u32 vlan)
6589 struct tg3 *tp = tnapi->tp;
6590 struct sk_buff *new_skb, *skb = *pskb;
6591 dma_addr_t new_addr = 0;
6594 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6595 new_skb = skb_copy(skb, GFP_ATOMIC);
6597 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6599 new_skb = skb_copy_expand(skb,
6600 skb_headroom(skb) + more_headroom,
6601 skb_tailroom(skb), GFP_ATOMIC);
6607 /* New SKB is guaranteed to be linear. */
6608 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6610 /* Make sure the mapping succeeded */
6611 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6612 dev_kfree_skb(new_skb);
6615 u32 save_entry = *entry;
6617 base_flags |= TXD_FLAG_END;
6619 tnapi->tx_buffers[*entry].skb = new_skb;
6620 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6623 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6624 new_skb->len, base_flags,
6626 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6627 dev_kfree_skb(new_skb);
6638 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6640 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6641 * TSO header is greater than 80 bytes.
6643 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6645 struct sk_buff *segs, *nskb;
6646 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6648 /* Estimate the number of fragments in the worst case */
6649 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6650 netif_stop_queue(tp->dev);
6652 /* netif_tx_stop_queue() must be done before checking
6653 * checking tx index in tg3_tx_avail() below, because in
6654 * tg3_tx(), we update tx index before checking for
6655 * netif_tx_queue_stopped().
6658 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6659 return NETDEV_TX_BUSY;
6661 netif_wake_queue(tp->dev);
6664 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6666 goto tg3_tso_bug_end;
6672 tg3_start_xmit(nskb, tp->dev);
6678 return NETDEV_TX_OK;
6681 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6682 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6684 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6686 struct tg3 *tp = netdev_priv(dev);
6687 u32 len, entry, base_flags, mss, vlan = 0;
6689 int i = -1, would_hit_hwbug;
6691 struct tg3_napi *tnapi;
6692 struct netdev_queue *txq;
6695 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6696 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6697 if (tg3_flag(tp, ENABLE_TSS))
6700 budget = tg3_tx_avail(tnapi);
6702 /* We are running in BH disabled context with netif_tx_lock
6703 * and TX reclaim runs via tp->napi.poll inside of a software
6704 * interrupt. Furthermore, IRQ processing runs lockless so we have
6705 * no IRQ context deadlocks to worry about either. Rejoice!
6707 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6708 if (!netif_tx_queue_stopped(txq)) {
6709 netif_tx_stop_queue(txq);
6711 /* This is a hard error, log it. */
6713 "BUG! Tx Ring full when queue awake!\n");
6715 return NETDEV_TX_BUSY;
6718 entry = tnapi->tx_prod;
6720 if (skb->ip_summed == CHECKSUM_PARTIAL)
6721 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6723 mss = skb_shinfo(skb)->gso_size;
6726 u32 tcp_opt_len, hdr_len;
6728 if (skb_header_cloned(skb) &&
6729 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6733 tcp_opt_len = tcp_optlen(skb);
6735 if (skb_is_gso_v6(skb)) {
6736 hdr_len = skb_headlen(skb) - ETH_HLEN;
6740 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6741 hdr_len = ip_tcp_len + tcp_opt_len;
6744 iph->tot_len = htons(mss + hdr_len);
6747 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6748 tg3_flag(tp, TSO_BUG))
6749 return tg3_tso_bug(tp, skb);
6751 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6752 TXD_FLAG_CPU_POST_DMA);
6754 if (tg3_flag(tp, HW_TSO_1) ||
6755 tg3_flag(tp, HW_TSO_2) ||
6756 tg3_flag(tp, HW_TSO_3)) {
6757 tcp_hdr(skb)->check = 0;
6758 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6760 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6765 if (tg3_flag(tp, HW_TSO_3)) {
6766 mss |= (hdr_len & 0xc) << 12;
6768 base_flags |= 0x00000010;
6769 base_flags |= (hdr_len & 0x3e0) << 5;
6770 } else if (tg3_flag(tp, HW_TSO_2))
6771 mss |= hdr_len << 9;
6772 else if (tg3_flag(tp, HW_TSO_1) ||
6773 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6774 if (tcp_opt_len || iph->ihl > 5) {
6777 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6778 mss |= (tsflags << 11);
6781 if (tcp_opt_len || iph->ihl > 5) {
6784 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6785 base_flags |= tsflags << 12;
6790 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6791 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6792 base_flags |= TXD_FLAG_JMB_PKT;
6794 if (vlan_tx_tag_present(skb)) {
6795 base_flags |= TXD_FLAG_VLAN;
6796 vlan = vlan_tx_tag_get(skb);
6799 len = skb_headlen(skb);
6801 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6802 if (pci_dma_mapping_error(tp->pdev, mapping))
6806 tnapi->tx_buffers[entry].skb = skb;
6807 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6809 would_hit_hwbug = 0;
6811 if (tg3_flag(tp, 5701_DMA_BUG))
6812 would_hit_hwbug = 1;
6814 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6815 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6817 would_hit_hwbug = 1;
6818 /* Now loop through additional data fragments, and queue them. */
6819 } else if (skb_shinfo(skb)->nr_frags > 0) {
6822 if (!tg3_flag(tp, HW_TSO_1) &&
6823 !tg3_flag(tp, HW_TSO_2) &&
6824 !tg3_flag(tp, HW_TSO_3))
6827 last = skb_shinfo(skb)->nr_frags - 1;
6828 for (i = 0; i <= last; i++) {
6829 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6831 len = skb_frag_size(frag);
6832 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6833 len, DMA_TO_DEVICE);
6835 tnapi->tx_buffers[entry].skb = NULL;
6836 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6838 if (dma_mapping_error(&tp->pdev->dev, mapping))
6842 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6844 ((i == last) ? TXD_FLAG_END : 0),
6846 would_hit_hwbug = 1;
6852 if (would_hit_hwbug) {
6853 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6855 /* If the workaround fails due to memory/mapping
6856 * failure, silently drop this packet.
6858 entry = tnapi->tx_prod;
6859 budget = tg3_tx_avail(tnapi);
6860 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6861 base_flags, mss, vlan))
6865 skb_tx_timestamp(skb);
6867 /* Packets are ready, update Tx producer idx local and on card. */
6868 tw32_tx_mbox(tnapi->prodmbox, entry);
6870 tnapi->tx_prod = entry;
6871 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6872 netif_tx_stop_queue(txq);
6874 /* netif_tx_stop_queue() must be done before checking
6875 * checking tx index in tg3_tx_avail() below, because in
6876 * tg3_tx(), we update tx index before checking for
6877 * netif_tx_queue_stopped().
6880 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6881 netif_tx_wake_queue(txq);
6885 return NETDEV_TX_OK;
6888 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6889 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6894 return NETDEV_TX_OK;
6897 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6900 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6901 MAC_MODE_PORT_MODE_MASK);
6903 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6905 if (!tg3_flag(tp, 5705_PLUS))
6906 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6908 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6909 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6911 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6913 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6915 if (tg3_flag(tp, 5705_PLUS) ||
6916 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6917 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6918 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6921 tw32(MAC_MODE, tp->mac_mode);
6925 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6927 u32 val, bmcr, mac_mode, ptest = 0;
6929 tg3_phy_toggle_apd(tp, false);
6930 tg3_phy_toggle_automdix(tp, 0);
6932 if (extlpbk && tg3_phy_set_extloopbk(tp))
6935 bmcr = BMCR_FULLDPLX;
6940 bmcr |= BMCR_SPEED100;
6944 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6946 bmcr |= BMCR_SPEED100;
6949 bmcr |= BMCR_SPEED1000;
6954 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6955 tg3_readphy(tp, MII_CTRL1000, &val);
6956 val |= CTL1000_AS_MASTER |
6957 CTL1000_ENABLE_MASTER;
6958 tg3_writephy(tp, MII_CTRL1000, val);
6960 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6961 MII_TG3_FET_PTEST_TRIM_2;
6962 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6965 bmcr |= BMCR_LOOPBACK;
6967 tg3_writephy(tp, MII_BMCR, bmcr);
6969 /* The write needs to be flushed for the FETs */
6970 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6971 tg3_readphy(tp, MII_BMCR, &bmcr);
6975 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6977 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6978 MII_TG3_FET_PTEST_FRC_TX_LINK |
6979 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6981 /* The write needs to be flushed for the AC131 */
6982 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6985 /* Reset to prevent losing 1st rx packet intermittently */
6986 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6987 tg3_flag(tp, 5780_CLASS)) {
6988 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6990 tw32_f(MAC_RX_MODE, tp->rx_mode);
6993 mac_mode = tp->mac_mode &
6994 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6995 if (speed == SPEED_1000)
6996 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6998 mac_mode |= MAC_MODE_PORT_MODE_MII;
7000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7001 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7003 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7004 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7005 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7006 mac_mode |= MAC_MODE_LINK_POLARITY;
7008 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7009 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7012 tw32(MAC_MODE, mac_mode);
7018 static void tg3_set_loopback(struct net_device *dev, u32 features)
7020 struct tg3 *tp = netdev_priv(dev);
7022 if (features & NETIF_F_LOOPBACK) {
7023 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7026 spin_lock_bh(&tp->lock);
7027 tg3_mac_loopback(tp, true);
7028 netif_carrier_on(tp->dev);
7029 spin_unlock_bh(&tp->lock);
7030 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7032 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7035 spin_lock_bh(&tp->lock);
7036 tg3_mac_loopback(tp, false);
7037 /* Force link status check */
7038 tg3_setup_phy(tp, 1);
7039 spin_unlock_bh(&tp->lock);
7040 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7044 static u32 tg3_fix_features(struct net_device *dev, u32 features)
7046 struct tg3 *tp = netdev_priv(dev);
7048 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7049 features &= ~NETIF_F_ALL_TSO;
7054 static int tg3_set_features(struct net_device *dev, u32 features)
7056 u32 changed = dev->features ^ features;
7058 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7059 tg3_set_loopback(dev, features);
7064 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7069 if (new_mtu > ETH_DATA_LEN) {
7070 if (tg3_flag(tp, 5780_CLASS)) {
7071 netdev_update_features(dev);
7072 tg3_flag_clear(tp, TSO_CAPABLE);
7074 tg3_flag_set(tp, JUMBO_RING_ENABLE);
7077 if (tg3_flag(tp, 5780_CLASS)) {
7078 tg3_flag_set(tp, TSO_CAPABLE);
7079 netdev_update_features(dev);
7081 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7085 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7087 struct tg3 *tp = netdev_priv(dev);
7090 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7093 if (!netif_running(dev)) {
7094 /* We'll just catch it later when the
7097 tg3_set_mtu(dev, tp, new_mtu);
7105 tg3_set_mtu(dev, tp, new_mtu);
7107 tg3_full_lock(tp, 1);
7109 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7111 err = tg3_restart_hw(tp, 0);
7114 tg3_netif_start(tp);
7116 tg3_full_unlock(tp);
7124 static void tg3_rx_prodring_free(struct tg3 *tp,
7125 struct tg3_rx_prodring_set *tpr)
7129 if (tpr != &tp->napi[0].prodring) {
7130 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7131 i = (i + 1) & tp->rx_std_ring_mask)
7132 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7135 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7136 for (i = tpr->rx_jmb_cons_idx;
7137 i != tpr->rx_jmb_prod_idx;
7138 i = (i + 1) & tp->rx_jmb_ring_mask) {
7139 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7147 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7148 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7151 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7152 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7153 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7158 /* Initialize rx rings for packet processing.
7160 * The chip has been shut down and the driver detached from
7161 * the networking, so no interrupts or new tx packets will
7162 * end up in the driver. tp->{tx,}lock are held and thus
7165 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7166 struct tg3_rx_prodring_set *tpr)
7168 u32 i, rx_pkt_dma_sz;
7170 tpr->rx_std_cons_idx = 0;
7171 tpr->rx_std_prod_idx = 0;
7172 tpr->rx_jmb_cons_idx = 0;
7173 tpr->rx_jmb_prod_idx = 0;
7175 if (tpr != &tp->napi[0].prodring) {
7176 memset(&tpr->rx_std_buffers[0], 0,
7177 TG3_RX_STD_BUFF_RING_SIZE(tp));
7178 if (tpr->rx_jmb_buffers)
7179 memset(&tpr->rx_jmb_buffers[0], 0,
7180 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7184 /* Zero out all descriptors. */
7185 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7187 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7188 if (tg3_flag(tp, 5780_CLASS) &&
7189 tp->dev->mtu > ETH_DATA_LEN)
7190 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7191 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7193 /* Initialize invariants of the rings, we only set this
7194 * stuff once. This works because the card does not
7195 * write into the rx buffer posting rings.
7197 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7198 struct tg3_rx_buffer_desc *rxd;
7200 rxd = &tpr->rx_std[i];
7201 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7202 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7203 rxd->opaque = (RXD_OPAQUE_RING_STD |
7204 (i << RXD_OPAQUE_INDEX_SHIFT));
7207 /* Now allocate fresh SKBs for each rx ring. */
7208 for (i = 0; i < tp->rx_pending; i++) {
7209 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7210 netdev_warn(tp->dev,
7211 "Using a smaller RX standard ring. Only "
7212 "%d out of %d buffers were allocated "
7213 "successfully\n", i, tp->rx_pending);
7221 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7224 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7226 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7229 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7230 struct tg3_rx_buffer_desc *rxd;
7232 rxd = &tpr->rx_jmb[i].std;
7233 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7234 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7236 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7237 (i << RXD_OPAQUE_INDEX_SHIFT));
7240 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7241 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7242 netdev_warn(tp->dev,
7243 "Using a smaller RX jumbo ring. Only %d "
7244 "out of %d buffers were allocated "
7245 "successfully\n", i, tp->rx_jumbo_pending);
7248 tp->rx_jumbo_pending = i;
7257 tg3_rx_prodring_free(tp, tpr);
7261 static void tg3_rx_prodring_fini(struct tg3 *tp,
7262 struct tg3_rx_prodring_set *tpr)
7264 kfree(tpr->rx_std_buffers);
7265 tpr->rx_std_buffers = NULL;
7266 kfree(tpr->rx_jmb_buffers);
7267 tpr->rx_jmb_buffers = NULL;
7269 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7270 tpr->rx_std, tpr->rx_std_mapping);
7274 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7275 tpr->rx_jmb, tpr->rx_jmb_mapping);
7280 static int tg3_rx_prodring_init(struct tg3 *tp,
7281 struct tg3_rx_prodring_set *tpr)
7283 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7285 if (!tpr->rx_std_buffers)
7288 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7289 TG3_RX_STD_RING_BYTES(tp),
7290 &tpr->rx_std_mapping,
7295 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7296 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7298 if (!tpr->rx_jmb_buffers)
7301 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7302 TG3_RX_JMB_RING_BYTES(tp),
7303 &tpr->rx_jmb_mapping,
7312 tg3_rx_prodring_fini(tp, tpr);
7316 /* Free up pending packets in all rx/tx rings.
7318 * The chip has been shut down and the driver detached from
7319 * the networking, so no interrupts or new tx packets will
7320 * end up in the driver. tp->{tx,}lock is not held and we are not
7321 * in an interrupt context and thus may sleep.
7323 static void tg3_free_rings(struct tg3 *tp)
7327 for (j = 0; j < tp->irq_cnt; j++) {
7328 struct tg3_napi *tnapi = &tp->napi[j];
7330 tg3_rx_prodring_free(tp, &tnapi->prodring);
7332 if (!tnapi->tx_buffers)
7335 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7336 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7341 tg3_tx_skb_unmap(tnapi, i,
7342 skb_shinfo(skb)->nr_frags - 1);
7344 dev_kfree_skb_any(skb);
7349 /* Initialize tx/rx rings for packet processing.
7351 * The chip has been shut down and the driver detached from
7352 * the networking, so no interrupts or new tx packets will
7353 * end up in the driver. tp->{tx,}lock are held and thus
7356 static int tg3_init_rings(struct tg3 *tp)
7360 /* Free up all the SKBs. */
7363 for (i = 0; i < tp->irq_cnt; i++) {
7364 struct tg3_napi *tnapi = &tp->napi[i];
7366 tnapi->last_tag = 0;
7367 tnapi->last_irq_tag = 0;
7368 tnapi->hw_status->status = 0;
7369 tnapi->hw_status->status_tag = 0;
7370 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7375 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7377 tnapi->rx_rcb_ptr = 0;
7379 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7381 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7391 * Must not be invoked with interrupt sources disabled and
7392 * the hardware shutdown down.
7394 static void tg3_free_consistent(struct tg3 *tp)
7398 for (i = 0; i < tp->irq_cnt; i++) {
7399 struct tg3_napi *tnapi = &tp->napi[i];
7401 if (tnapi->tx_ring) {
7402 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7403 tnapi->tx_ring, tnapi->tx_desc_mapping);
7404 tnapi->tx_ring = NULL;
7407 kfree(tnapi->tx_buffers);
7408 tnapi->tx_buffers = NULL;
7410 if (tnapi->rx_rcb) {
7411 dma_free_coherent(&tp->pdev->dev,
7412 TG3_RX_RCB_RING_BYTES(tp),
7414 tnapi->rx_rcb_mapping);
7415 tnapi->rx_rcb = NULL;
7418 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7420 if (tnapi->hw_status) {
7421 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7423 tnapi->status_mapping);
7424 tnapi->hw_status = NULL;
7429 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7430 tp->hw_stats, tp->stats_mapping);
7431 tp->hw_stats = NULL;
7436 * Must not be invoked with interrupt sources disabled and
7437 * the hardware shutdown down. Can sleep.
7439 static int tg3_alloc_consistent(struct tg3 *tp)
7443 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7444 sizeof(struct tg3_hw_stats),
7450 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7452 for (i = 0; i < tp->irq_cnt; i++) {
7453 struct tg3_napi *tnapi = &tp->napi[i];
7454 struct tg3_hw_status *sblk;
7456 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7458 &tnapi->status_mapping,
7460 if (!tnapi->hw_status)
7463 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7464 sblk = tnapi->hw_status;
7466 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7469 /* If multivector TSS is enabled, vector 0 does not handle
7470 * tx interrupts. Don't allocate any resources for it.
7472 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7473 (i && tg3_flag(tp, ENABLE_TSS))) {
7474 tnapi->tx_buffers = kzalloc(
7475 sizeof(struct tg3_tx_ring_info) *
7476 TG3_TX_RING_SIZE, GFP_KERNEL);
7477 if (!tnapi->tx_buffers)
7480 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7482 &tnapi->tx_desc_mapping,
7484 if (!tnapi->tx_ring)
7489 * When RSS is enabled, the status block format changes
7490 * slightly. The "rx_jumbo_consumer", "reserved",
7491 * and "rx_mini_consumer" members get mapped to the
7492 * other three rx return ring producer indexes.
7496 if (tg3_flag(tp, ENABLE_RSS)) {
7497 tnapi->rx_rcb_prod_idx = NULL;
7502 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7505 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7508 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7511 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7516 * If multivector RSS is enabled, vector 0 does not handle
7517 * rx or tx interrupts. Don't allocate any resources for it.
7519 if (!i && tg3_flag(tp, ENABLE_RSS))
7522 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7523 TG3_RX_RCB_RING_BYTES(tp),
7524 &tnapi->rx_rcb_mapping,
7529 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7535 tg3_free_consistent(tp);
7539 #define MAX_WAIT_CNT 1000
7541 /* To stop a block, clear the enable bit and poll till it
7542 * clears. tp->lock is held.
7544 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7549 if (tg3_flag(tp, 5705_PLUS)) {
7556 /* We can't enable/disable these bits of the
7557 * 5705/5750, just say success.
7570 for (i = 0; i < MAX_WAIT_CNT; i++) {
7573 if ((val & enable_bit) == 0)
7577 if (i == MAX_WAIT_CNT && !silent) {
7578 dev_err(&tp->pdev->dev,
7579 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7587 /* tp->lock is held. */
7588 static int tg3_abort_hw(struct tg3 *tp, int silent)
7592 tg3_disable_ints(tp);
7594 tp->rx_mode &= ~RX_MODE_ENABLE;
7595 tw32_f(MAC_RX_MODE, tp->rx_mode);
7598 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7599 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7600 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7601 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7602 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7603 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7605 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7606 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7607 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7608 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7609 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7610 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7611 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7613 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7614 tw32_f(MAC_MODE, tp->mac_mode);
7617 tp->tx_mode &= ~TX_MODE_ENABLE;
7618 tw32_f(MAC_TX_MODE, tp->tx_mode);
7620 for (i = 0; i < MAX_WAIT_CNT; i++) {
7622 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7625 if (i >= MAX_WAIT_CNT) {
7626 dev_err(&tp->pdev->dev,
7627 "%s timed out, TX_MODE_ENABLE will not clear "
7628 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7632 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7633 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7634 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7636 tw32(FTQ_RESET, 0xffffffff);
7637 tw32(FTQ_RESET, 0x00000000);
7639 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7640 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7642 for (i = 0; i < tp->irq_cnt; i++) {
7643 struct tg3_napi *tnapi = &tp->napi[i];
7644 if (tnapi->hw_status)
7645 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7648 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7653 /* Save PCI command register before chip reset */
7654 static void tg3_save_pci_state(struct tg3 *tp)
7656 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7659 /* Restore PCI state after chip reset */
7660 static void tg3_restore_pci_state(struct tg3 *tp)
7664 /* Re-enable indirect register accesses. */
7665 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7666 tp->misc_host_ctrl);
7668 /* Set MAX PCI retry to zero. */
7669 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7670 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7671 tg3_flag(tp, PCIX_MODE))
7672 val |= PCISTATE_RETRY_SAME_DMA;
7673 /* Allow reads and writes to the APE register and memory space. */
7674 if (tg3_flag(tp, ENABLE_APE))
7675 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7676 PCISTATE_ALLOW_APE_SHMEM_WR |
7677 PCISTATE_ALLOW_APE_PSPACE_WR;
7678 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7680 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7682 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7683 if (tg3_flag(tp, PCI_EXPRESS))
7684 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7686 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7687 tp->pci_cacheline_sz);
7688 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7693 /* Make sure PCI-X relaxed ordering bit is clear. */
7694 if (tg3_flag(tp, PCIX_MODE)) {
7697 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7699 pcix_cmd &= ~PCI_X_CMD_ERO;
7700 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7704 if (tg3_flag(tp, 5780_CLASS)) {
7706 /* Chip reset on 5780 will reset MSI enable bit,
7707 * so need to restore it.
7709 if (tg3_flag(tp, USING_MSI)) {
7712 pci_read_config_word(tp->pdev,
7713 tp->msi_cap + PCI_MSI_FLAGS,
7715 pci_write_config_word(tp->pdev,
7716 tp->msi_cap + PCI_MSI_FLAGS,
7717 ctrl | PCI_MSI_FLAGS_ENABLE);
7718 val = tr32(MSGINT_MODE);
7719 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7724 /* tp->lock is held. */
7725 static int tg3_chip_reset(struct tg3 *tp)
7728 void (*write_op)(struct tg3 *, u32, u32);
7733 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7735 /* No matching tg3_nvram_unlock() after this because
7736 * chip reset below will undo the nvram lock.
7738 tp->nvram_lock_cnt = 0;
7740 /* GRC_MISC_CFG core clock reset will clear the memory
7741 * enable bit in PCI register 4 and the MSI enable bit
7742 * on some chips, so we save relevant registers here.
7744 tg3_save_pci_state(tp);
7746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7747 tg3_flag(tp, 5755_PLUS))
7748 tw32(GRC_FASTBOOT_PC, 0);
7751 * We must avoid the readl() that normally takes place.
7752 * It locks machines, causes machine checks, and other
7753 * fun things. So, temporarily disable the 5701
7754 * hardware workaround, while we do the reset.
7756 write_op = tp->write32;
7757 if (write_op == tg3_write_flush_reg32)
7758 tp->write32 = tg3_write32;
7760 /* Prevent the irq handler from reading or writing PCI registers
7761 * during chip reset when the memory enable bit in the PCI command
7762 * register may be cleared. The chip does not generate interrupt
7763 * at this time, but the irq handler may still be called due to irq
7764 * sharing or irqpoll.
7766 tg3_flag_set(tp, CHIP_RESETTING);
7767 for (i = 0; i < tp->irq_cnt; i++) {
7768 struct tg3_napi *tnapi = &tp->napi[i];
7769 if (tnapi->hw_status) {
7770 tnapi->hw_status->status = 0;
7771 tnapi->hw_status->status_tag = 0;
7773 tnapi->last_tag = 0;
7774 tnapi->last_irq_tag = 0;
7778 for (i = 0; i < tp->irq_cnt; i++)
7779 synchronize_irq(tp->napi[i].irq_vec);
7781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7782 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7783 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7787 val = GRC_MISC_CFG_CORECLK_RESET;
7789 if (tg3_flag(tp, PCI_EXPRESS)) {
7790 /* Force PCIe 1.0a mode */
7791 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7792 !tg3_flag(tp, 57765_PLUS) &&
7793 tr32(TG3_PCIE_PHY_TSTCTL) ==
7794 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7795 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7797 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7798 tw32(GRC_MISC_CFG, (1 << 29));
7803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7804 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7805 tw32(GRC_VCPU_EXT_CTRL,
7806 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7809 /* Manage gphy power for all CPMU absent PCIe devices. */
7810 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7811 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7813 tw32(GRC_MISC_CFG, val);
7815 /* restore 5701 hardware bug workaround write method */
7816 tp->write32 = write_op;
7818 /* Unfortunately, we have to delay before the PCI read back.
7819 * Some 575X chips even will not respond to a PCI cfg access
7820 * when the reset command is given to the chip.
7822 * How do these hardware designers expect things to work
7823 * properly if the PCI write is posted for a long period
7824 * of time? It is always necessary to have some method by
7825 * which a register read back can occur to push the write
7826 * out which does the reset.
7828 * For most tg3 variants the trick below was working.
7833 /* Flush PCI posted writes. The normal MMIO registers
7834 * are inaccessible at this time so this is the only
7835 * way to make this reliably (actually, this is no longer
7836 * the case, see above). I tried to use indirect
7837 * register read/write but this upset some 5701 variants.
7839 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7843 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7846 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7850 /* Wait for link training to complete. */
7851 for (i = 0; i < 5000; i++)
7854 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7855 pci_write_config_dword(tp->pdev, 0xc4,
7856 cfg_val | (1 << 15));
7859 /* Clear the "no snoop" and "relaxed ordering" bits. */
7860 pci_read_config_word(tp->pdev,
7861 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7863 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7864 PCI_EXP_DEVCTL_NOSNOOP_EN);
7866 * Older PCIe devices only support the 128 byte
7867 * MPS setting. Enforce the restriction.
7869 if (!tg3_flag(tp, CPMU_PRESENT))
7870 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7871 pci_write_config_word(tp->pdev,
7872 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7875 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7877 /* Clear error status */
7878 pci_write_config_word(tp->pdev,
7879 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7880 PCI_EXP_DEVSTA_CED |
7881 PCI_EXP_DEVSTA_NFED |
7882 PCI_EXP_DEVSTA_FED |
7883 PCI_EXP_DEVSTA_URD);
7886 tg3_restore_pci_state(tp);
7888 tg3_flag_clear(tp, CHIP_RESETTING);
7889 tg3_flag_clear(tp, ERROR_PROCESSED);
7892 if (tg3_flag(tp, 5780_CLASS))
7893 val = tr32(MEMARB_MODE);
7894 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7896 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7898 tw32(0x5000, 0x400);
7901 tw32(GRC_MODE, tp->grc_mode);
7903 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7906 tw32(0xc4, val | (1 << 15));
7909 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7911 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7912 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7913 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7914 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7917 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7918 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7920 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7921 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7926 tw32_f(MAC_MODE, val);
7929 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7931 err = tg3_poll_fw(tp);
7937 if (tg3_flag(tp, PCI_EXPRESS) &&
7938 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7939 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7940 !tg3_flag(tp, 57765_PLUS)) {
7943 tw32(0x7c00, val | (1 << 25));
7946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7947 val = tr32(TG3_CPMU_CLCK_ORIDE);
7948 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7951 /* Reprobe ASF enable state. */
7952 tg3_flag_clear(tp, ENABLE_ASF);
7953 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7954 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7955 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7958 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7959 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7960 tg3_flag_set(tp, ENABLE_ASF);
7961 tp->last_event_jiffies = jiffies;
7962 if (tg3_flag(tp, 5750_PLUS))
7963 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7970 /* tp->lock is held. */
7971 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7977 tg3_write_sig_pre_reset(tp, kind);
7979 tg3_abort_hw(tp, silent);
7980 err = tg3_chip_reset(tp);
7982 __tg3_set_mac_addr(tp, 0);
7984 tg3_write_sig_legacy(tp, kind);
7985 tg3_write_sig_post_reset(tp, kind);
7993 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7995 struct tg3 *tp = netdev_priv(dev);
7996 struct sockaddr *addr = p;
7997 int err = 0, skip_mac_1 = 0;
7999 if (!is_valid_ether_addr(addr->sa_data))
8002 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8004 if (!netif_running(dev))
8007 if (tg3_flag(tp, ENABLE_ASF)) {
8008 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8010 addr0_high = tr32(MAC_ADDR_0_HIGH);
8011 addr0_low = tr32(MAC_ADDR_0_LOW);
8012 addr1_high = tr32(MAC_ADDR_1_HIGH);
8013 addr1_low = tr32(MAC_ADDR_1_LOW);
8015 /* Skip MAC addr 1 if ASF is using it. */
8016 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8017 !(addr1_high == 0 && addr1_low == 0))
8020 spin_lock_bh(&tp->lock);
8021 __tg3_set_mac_addr(tp, skip_mac_1);
8022 spin_unlock_bh(&tp->lock);
8027 /* tp->lock is held. */
8028 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8029 dma_addr_t mapping, u32 maxlen_flags,
8033 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8034 ((u64) mapping >> 32));
8036 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8037 ((u64) mapping & 0xffffffff));
8039 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8042 if (!tg3_flag(tp, 5705_PLUS))
8044 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8048 static void __tg3_set_rx_mode(struct net_device *);
8049 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8053 if (!tg3_flag(tp, ENABLE_TSS)) {
8054 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8055 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8056 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8058 tw32(HOSTCC_TXCOL_TICKS, 0);
8059 tw32(HOSTCC_TXMAX_FRAMES, 0);
8060 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8063 if (!tg3_flag(tp, ENABLE_RSS)) {
8064 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8065 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8066 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8068 tw32(HOSTCC_RXCOL_TICKS, 0);
8069 tw32(HOSTCC_RXMAX_FRAMES, 0);
8070 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8073 if (!tg3_flag(tp, 5705_PLUS)) {
8074 u32 val = ec->stats_block_coalesce_usecs;
8076 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8077 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8079 if (!netif_carrier_ok(tp->dev))
8082 tw32(HOSTCC_STAT_COAL_TICKS, val);
8085 for (i = 0; i < tp->irq_cnt - 1; i++) {
8088 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8089 tw32(reg, ec->rx_coalesce_usecs);
8090 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8091 tw32(reg, ec->rx_max_coalesced_frames);
8092 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8093 tw32(reg, ec->rx_max_coalesced_frames_irq);
8095 if (tg3_flag(tp, ENABLE_TSS)) {
8096 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8097 tw32(reg, ec->tx_coalesce_usecs);
8098 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8099 tw32(reg, ec->tx_max_coalesced_frames);
8100 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8101 tw32(reg, ec->tx_max_coalesced_frames_irq);
8105 for (; i < tp->irq_max - 1; i++) {
8106 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8107 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8108 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8110 if (tg3_flag(tp, ENABLE_TSS)) {
8111 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8112 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8113 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8118 /* tp->lock is held. */
8119 static void tg3_rings_reset(struct tg3 *tp)
8122 u32 stblk, txrcb, rxrcb, limit;
8123 struct tg3_napi *tnapi = &tp->napi[0];
8125 /* Disable all transmit rings but the first. */
8126 if (!tg3_flag(tp, 5705_PLUS))
8127 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8128 else if (tg3_flag(tp, 5717_PLUS))
8129 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8130 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8131 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8133 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8135 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8136 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8137 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8138 BDINFO_FLAGS_DISABLED);
8141 /* Disable all receive return rings but the first. */
8142 if (tg3_flag(tp, 5717_PLUS))
8143 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8144 else if (!tg3_flag(tp, 5705_PLUS))
8145 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8146 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8148 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8150 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8152 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8153 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8154 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8155 BDINFO_FLAGS_DISABLED);
8157 /* Disable interrupts */
8158 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8159 tp->napi[0].chk_msi_cnt = 0;
8160 tp->napi[0].last_rx_cons = 0;
8161 tp->napi[0].last_tx_cons = 0;
8163 /* Zero mailbox registers. */
8164 if (tg3_flag(tp, SUPPORT_MSIX)) {
8165 for (i = 1; i < tp->irq_max; i++) {
8166 tp->napi[i].tx_prod = 0;
8167 tp->napi[i].tx_cons = 0;
8168 if (tg3_flag(tp, ENABLE_TSS))
8169 tw32_mailbox(tp->napi[i].prodmbox, 0);
8170 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8171 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8172 tp->napi[i].chk_msi_cnt = 0;
8173 tp->napi[i].last_rx_cons = 0;
8174 tp->napi[i].last_tx_cons = 0;
8176 if (!tg3_flag(tp, ENABLE_TSS))
8177 tw32_mailbox(tp->napi[0].prodmbox, 0);
8179 tp->napi[0].tx_prod = 0;
8180 tp->napi[0].tx_cons = 0;
8181 tw32_mailbox(tp->napi[0].prodmbox, 0);
8182 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8185 /* Make sure the NIC-based send BD rings are disabled. */
8186 if (!tg3_flag(tp, 5705_PLUS)) {
8187 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8188 for (i = 0; i < 16; i++)
8189 tw32_tx_mbox(mbox + i * 8, 0);
8192 txrcb = NIC_SRAM_SEND_RCB;
8193 rxrcb = NIC_SRAM_RCV_RET_RCB;
8195 /* Clear status block in ram. */
8196 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8198 /* Set status block DMA address */
8199 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8200 ((u64) tnapi->status_mapping >> 32));
8201 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8202 ((u64) tnapi->status_mapping & 0xffffffff));
8204 if (tnapi->tx_ring) {
8205 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8206 (TG3_TX_RING_SIZE <<
8207 BDINFO_FLAGS_MAXLEN_SHIFT),
8208 NIC_SRAM_TX_BUFFER_DESC);
8209 txrcb += TG3_BDINFO_SIZE;
8212 if (tnapi->rx_rcb) {
8213 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8214 (tp->rx_ret_ring_mask + 1) <<
8215 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8216 rxrcb += TG3_BDINFO_SIZE;
8219 stblk = HOSTCC_STATBLCK_RING1;
8221 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8222 u64 mapping = (u64)tnapi->status_mapping;
8223 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8224 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8226 /* Clear status block in ram. */
8227 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8229 if (tnapi->tx_ring) {
8230 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8231 (TG3_TX_RING_SIZE <<
8232 BDINFO_FLAGS_MAXLEN_SHIFT),
8233 NIC_SRAM_TX_BUFFER_DESC);
8234 txrcb += TG3_BDINFO_SIZE;
8237 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8238 ((tp->rx_ret_ring_mask + 1) <<
8239 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8242 rxrcb += TG3_BDINFO_SIZE;
8246 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8248 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8250 if (!tg3_flag(tp, 5750_PLUS) ||
8251 tg3_flag(tp, 5780_CLASS) ||
8252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8254 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8255 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8257 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8259 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8261 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8262 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8264 val = min(nic_rep_thresh, host_rep_thresh);
8265 tw32(RCVBDI_STD_THRESH, val);
8267 if (tg3_flag(tp, 57765_PLUS))
8268 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8270 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8273 if (!tg3_flag(tp, 5705_PLUS))
8274 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8276 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8278 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8280 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8281 tw32(RCVBDI_JUMBO_THRESH, val);
8283 if (tg3_flag(tp, 57765_PLUS))
8284 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8287 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
8289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8290 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
8292 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
8295 /* tp->lock is held. */
8296 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8298 u32 val, rdmac_mode;
8300 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8302 tg3_disable_ints(tp);
8306 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8308 if (tg3_flag(tp, INIT_COMPLETE))
8309 tg3_abort_hw(tp, 1);
8311 /* Enable MAC control of LPI */
8312 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8313 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8314 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8315 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8317 tw32_f(TG3_CPMU_EEE_CTRL,
8318 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8320 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8321 TG3_CPMU_EEEMD_LPI_IN_TX |
8322 TG3_CPMU_EEEMD_LPI_IN_RX |
8323 TG3_CPMU_EEEMD_EEE_ENABLE;
8325 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8326 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8328 if (tg3_flag(tp, ENABLE_APE))
8329 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8331 tw32_f(TG3_CPMU_EEE_MODE, val);
8333 tw32_f(TG3_CPMU_EEE_DBTMR1,
8334 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8335 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8337 tw32_f(TG3_CPMU_EEE_DBTMR2,
8338 TG3_CPMU_DBTMR2_APE_TX_2047US |
8339 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8345 err = tg3_chip_reset(tp);
8349 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8351 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8352 val = tr32(TG3_CPMU_CTRL);
8353 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8354 tw32(TG3_CPMU_CTRL, val);
8356 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8357 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8358 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8359 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8361 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8362 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8363 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8364 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8366 val = tr32(TG3_CPMU_HST_ACC);
8367 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8368 val |= CPMU_HST_ACC_MACCLK_6_25;
8369 tw32(TG3_CPMU_HST_ACC, val);
8372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8373 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8374 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8375 PCIE_PWR_MGMT_L1_THRESH_4MS;
8376 tw32(PCIE_PWR_MGMT_THRESH, val);
8378 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8379 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8381 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8383 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8384 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8387 if (tg3_flag(tp, L1PLLPD_EN)) {
8388 u32 grc_mode = tr32(GRC_MODE);
8390 /* Access the lower 1K of PL PCIE block registers. */
8391 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8392 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8394 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8395 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8396 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8398 tw32(GRC_MODE, grc_mode);
8401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8402 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8403 u32 grc_mode = tr32(GRC_MODE);
8405 /* Access the lower 1K of PL PCIE block registers. */
8406 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8407 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8409 val = tr32(TG3_PCIE_TLDLPL_PORT +
8410 TG3_PCIE_PL_LO_PHYCTL5);
8411 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8412 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8414 tw32(GRC_MODE, grc_mode);
8417 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8418 u32 grc_mode = tr32(GRC_MODE);
8420 /* Access the lower 1K of DL PCIE block registers. */
8421 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8422 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8424 val = tr32(TG3_PCIE_TLDLPL_PORT +
8425 TG3_PCIE_DL_LO_FTSMAX);
8426 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8427 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8428 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8430 tw32(GRC_MODE, grc_mode);
8433 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8434 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8435 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8436 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8439 /* This works around an issue with Athlon chipsets on
8440 * B3 tigon3 silicon. This bit has no effect on any
8441 * other revision. But do not set this on PCI Express
8442 * chips and don't even touch the clocks if the CPMU is present.
8444 if (!tg3_flag(tp, CPMU_PRESENT)) {
8445 if (!tg3_flag(tp, PCI_EXPRESS))
8446 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8447 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8450 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8451 tg3_flag(tp, PCIX_MODE)) {
8452 val = tr32(TG3PCI_PCISTATE);
8453 val |= PCISTATE_RETRY_SAME_DMA;
8454 tw32(TG3PCI_PCISTATE, val);
8457 if (tg3_flag(tp, ENABLE_APE)) {
8458 /* Allow reads and writes to the
8459 * APE register and memory space.
8461 val = tr32(TG3PCI_PCISTATE);
8462 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8463 PCISTATE_ALLOW_APE_SHMEM_WR |
8464 PCISTATE_ALLOW_APE_PSPACE_WR;
8465 tw32(TG3PCI_PCISTATE, val);
8468 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8469 /* Enable some hw fixes. */
8470 val = tr32(TG3PCI_MSI_DATA);
8471 val |= (1 << 26) | (1 << 28) | (1 << 29);
8472 tw32(TG3PCI_MSI_DATA, val);
8475 /* Descriptor ring init may make accesses to the
8476 * NIC SRAM area to setup the TX descriptors, so we
8477 * can only do this after the hardware has been
8478 * successfully reset.
8480 err = tg3_init_rings(tp);
8484 if (tg3_flag(tp, 57765_PLUS)) {
8485 val = tr32(TG3PCI_DMA_RW_CTRL) &
8486 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8487 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8488 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8489 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8490 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8491 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8492 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8493 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8494 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8495 /* This value is determined during the probe time DMA
8496 * engine test, tg3_test_dma.
8498 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8501 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8502 GRC_MODE_4X_NIC_SEND_RINGS |
8503 GRC_MODE_NO_TX_PHDR_CSUM |
8504 GRC_MODE_NO_RX_PHDR_CSUM);
8505 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8507 /* Pseudo-header checksum is done by hardware logic and not
8508 * the offload processers, so make the chip do the pseudo-
8509 * header checksums on receive. For transmit it is more
8510 * convenient to do the pseudo-header checksum in software
8511 * as Linux does that on transmit for us in all cases.
8513 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8517 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8519 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8520 val = tr32(GRC_MISC_CFG);
8522 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8523 tw32(GRC_MISC_CFG, val);
8525 /* Initialize MBUF/DESC pool. */
8526 if (tg3_flag(tp, 5750_PLUS)) {
8528 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8529 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8531 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8533 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8534 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8535 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8536 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8539 fw_len = tp->fw_len;
8540 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8541 tw32(BUFMGR_MB_POOL_ADDR,
8542 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8543 tw32(BUFMGR_MB_POOL_SIZE,
8544 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8547 if (tp->dev->mtu <= ETH_DATA_LEN) {
8548 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8549 tp->bufmgr_config.mbuf_read_dma_low_water);
8550 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8551 tp->bufmgr_config.mbuf_mac_rx_low_water);
8552 tw32(BUFMGR_MB_HIGH_WATER,
8553 tp->bufmgr_config.mbuf_high_water);
8555 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8556 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8557 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8558 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8559 tw32(BUFMGR_MB_HIGH_WATER,
8560 tp->bufmgr_config.mbuf_high_water_jumbo);
8562 tw32(BUFMGR_DMA_LOW_WATER,
8563 tp->bufmgr_config.dma_low_water);
8564 tw32(BUFMGR_DMA_HIGH_WATER,
8565 tp->bufmgr_config.dma_high_water);
8567 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8569 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8571 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8572 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8573 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8574 tw32(BUFMGR_MODE, val);
8575 for (i = 0; i < 2000; i++) {
8576 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8581 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8585 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8586 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8588 tg3_setup_rxbd_thresholds(tp);
8590 /* Initialize TG3_BDINFO's at:
8591 * RCVDBDI_STD_BD: standard eth size rx ring
8592 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8593 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8596 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8597 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8598 * ring attribute flags
8599 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8601 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8602 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8604 * The size of each ring is fixed in the firmware, but the location is
8607 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8608 ((u64) tpr->rx_std_mapping >> 32));
8609 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8610 ((u64) tpr->rx_std_mapping & 0xffffffff));
8611 if (!tg3_flag(tp, 5717_PLUS))
8612 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8613 NIC_SRAM_RX_BUFFER_DESC);
8615 /* Disable the mini ring */
8616 if (!tg3_flag(tp, 5705_PLUS))
8617 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8618 BDINFO_FLAGS_DISABLED);
8620 /* Program the jumbo buffer descriptor ring control
8621 * blocks on those devices that have them.
8623 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8624 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8626 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8627 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8628 ((u64) tpr->rx_jmb_mapping >> 32));
8629 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8630 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8631 val = TG3_RX_JMB_RING_SIZE(tp) <<
8632 BDINFO_FLAGS_MAXLEN_SHIFT;
8633 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8634 val | BDINFO_FLAGS_USE_EXT_RECV);
8635 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8637 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8638 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8640 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8641 BDINFO_FLAGS_DISABLED);
8644 if (tg3_flag(tp, 57765_PLUS)) {
8645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8646 val = TG3_RX_STD_MAX_SIZE_5700;
8648 val = TG3_RX_STD_MAX_SIZE_5717;
8649 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8650 val |= (TG3_RX_STD_DMA_SZ << 2);
8652 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8654 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8656 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8658 tpr->rx_std_prod_idx = tp->rx_pending;
8659 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8661 tpr->rx_jmb_prod_idx =
8662 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8663 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8665 tg3_rings_reset(tp);
8667 /* Initialize MAC address and backoff seed. */
8668 __tg3_set_mac_addr(tp, 0);
8670 /* MTU + ethernet header + FCS + optional VLAN tag */
8671 tw32(MAC_RX_MTU_SIZE,
8672 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8674 /* The slot time is changed by tg3_setup_phy if we
8675 * run at gigabit with half duplex.
8677 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8678 (6 << TX_LENGTHS_IPG_SHIFT) |
8679 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8682 val |= tr32(MAC_TX_LENGTHS) &
8683 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8684 TX_LENGTHS_CNT_DWN_VAL_MSK);
8686 tw32(MAC_TX_LENGTHS, val);
8688 /* Receive rules. */
8689 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8690 tw32(RCVLPC_CONFIG, 0x0181);
8692 /* Calculate RDMAC_MODE setting early, we need it to determine
8693 * the RCVLPC_STATE_ENABLE mask.
8695 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8696 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8697 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8698 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8699 RDMAC_MODE_LNGREAD_ENAB);
8701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8702 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8707 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8708 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8709 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8712 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8713 if (tg3_flag(tp, TSO_CAPABLE) &&
8714 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8715 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8716 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8717 !tg3_flag(tp, IS_5788)) {
8718 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8722 if (tg3_flag(tp, PCI_EXPRESS))
8723 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8725 if (tg3_flag(tp, HW_TSO_1) ||
8726 tg3_flag(tp, HW_TSO_2) ||
8727 tg3_flag(tp, HW_TSO_3))
8728 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8730 if (tg3_flag(tp, 57765_PLUS) ||
8731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8732 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8733 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8736 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8742 tg3_flag(tp, 57765_PLUS)) {
8743 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8746 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8747 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8748 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8749 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8750 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8751 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8753 tw32(TG3_RDMA_RSRVCTRL_REG,
8754 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8757 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8758 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8759 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8760 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8761 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8762 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8765 /* Receive/send statistics. */
8766 if (tg3_flag(tp, 5750_PLUS)) {
8767 val = tr32(RCVLPC_STATS_ENABLE);
8768 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8769 tw32(RCVLPC_STATS_ENABLE, val);
8770 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8771 tg3_flag(tp, TSO_CAPABLE)) {
8772 val = tr32(RCVLPC_STATS_ENABLE);
8773 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8774 tw32(RCVLPC_STATS_ENABLE, val);
8776 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8778 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8779 tw32(SNDDATAI_STATSENAB, 0xffffff);
8780 tw32(SNDDATAI_STATSCTRL,
8781 (SNDDATAI_SCTRL_ENABLE |
8782 SNDDATAI_SCTRL_FASTUPD));
8784 /* Setup host coalescing engine. */
8785 tw32(HOSTCC_MODE, 0);
8786 for (i = 0; i < 2000; i++) {
8787 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8792 __tg3_set_coalesce(tp, &tp->coal);
8794 if (!tg3_flag(tp, 5705_PLUS)) {
8795 /* Status/statistics block address. See tg3_timer,
8796 * the tg3_periodic_fetch_stats call there, and
8797 * tg3_get_stats to see how this works for 5705/5750 chips.
8799 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8800 ((u64) tp->stats_mapping >> 32));
8801 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8802 ((u64) tp->stats_mapping & 0xffffffff));
8803 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8805 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8807 /* Clear statistics and status block memory areas */
8808 for (i = NIC_SRAM_STATS_BLK;
8809 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8811 tg3_write_mem(tp, i, 0);
8816 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8818 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8819 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8820 if (!tg3_flag(tp, 5705_PLUS))
8821 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8823 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8824 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8825 /* reset to prevent losing 1st rx packet intermittently */
8826 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8830 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8831 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8832 MAC_MODE_FHDE_ENABLE;
8833 if (tg3_flag(tp, ENABLE_APE))
8834 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8835 if (!tg3_flag(tp, 5705_PLUS) &&
8836 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8837 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8838 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8839 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8842 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8843 * If TG3_FLAG_IS_NIC is zero, we should read the
8844 * register to preserve the GPIO settings for LOMs. The GPIOs,
8845 * whether used as inputs or outputs, are set by boot code after
8848 if (!tg3_flag(tp, IS_NIC)) {
8851 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8852 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8853 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8855 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8856 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8857 GRC_LCLCTRL_GPIO_OUTPUT3;
8859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8860 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8862 tp->grc_local_ctrl &= ~gpio_mask;
8863 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8865 /* GPIO1 must be driven high for eeprom write protect */
8866 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8867 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8868 GRC_LCLCTRL_GPIO_OUTPUT1);
8870 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8873 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8874 val = tr32(MSGINT_MODE);
8875 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8876 if (!tg3_flag(tp, 1SHOT_MSI))
8877 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8878 tw32(MSGINT_MODE, val);
8881 if (!tg3_flag(tp, 5705_PLUS)) {
8882 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8886 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8887 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8888 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8889 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8890 WDMAC_MODE_LNGREAD_ENAB);
8892 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8893 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8894 if (tg3_flag(tp, TSO_CAPABLE) &&
8895 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8896 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8898 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8899 !tg3_flag(tp, IS_5788)) {
8900 val |= WDMAC_MODE_RX_ACCEL;
8904 /* Enable host coalescing bug fix */
8905 if (tg3_flag(tp, 5755_PLUS))
8906 val |= WDMAC_MODE_STATUS_TAG_FIX;
8908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8909 val |= WDMAC_MODE_BURST_ALL_DATA;
8911 tw32_f(WDMAC_MODE, val);
8914 if (tg3_flag(tp, PCIX_MODE)) {
8917 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8920 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8921 pcix_cmd |= PCI_X_CMD_READ_2K;
8922 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8923 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8924 pcix_cmd |= PCI_X_CMD_READ_2K;
8926 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8930 tw32_f(RDMAC_MODE, rdmac_mode);
8933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8935 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
8936 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
8939 if (i < TG3_NUM_RDMA_CHANNELS) {
8940 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8941 val |= tg3_lso_rd_dma_workaround_bit(tp);
8942 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
8943 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
8947 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8948 if (!tg3_flag(tp, 5705_PLUS))
8949 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8953 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8955 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8957 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8958 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8959 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8960 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8961 val |= RCVDBDI_MODE_LRG_RING_SZ;
8962 tw32(RCVDBDI_MODE, val);
8963 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8964 if (tg3_flag(tp, HW_TSO_1) ||
8965 tg3_flag(tp, HW_TSO_2) ||
8966 tg3_flag(tp, HW_TSO_3))
8967 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8968 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8969 if (tg3_flag(tp, ENABLE_TSS))
8970 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8971 tw32(SNDBDI_MODE, val);
8972 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8974 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8975 err = tg3_load_5701_a0_firmware_fix(tp);
8980 if (tg3_flag(tp, TSO_CAPABLE)) {
8981 err = tg3_load_tso_firmware(tp);
8986 tp->tx_mode = TX_MODE_ENABLE;
8988 if (tg3_flag(tp, 5755_PLUS) ||
8989 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8990 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8993 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8994 tp->tx_mode &= ~val;
8995 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8998 tw32_f(MAC_TX_MODE, tp->tx_mode);
9001 if (tg3_flag(tp, ENABLE_RSS)) {
9003 u32 reg = MAC_RSS_INDIR_TBL_0;
9005 if (tp->irq_cnt == 2) {
9006 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
9013 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9014 val = i % (tp->irq_cnt - 1);
9016 for (; i % 8; i++) {
9018 val |= (i % (tp->irq_cnt - 1));
9025 /* Setup the "secret" hash key. */
9026 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9027 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9028 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9029 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9030 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9031 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9032 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9033 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9034 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9035 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9038 tp->rx_mode = RX_MODE_ENABLE;
9039 if (tg3_flag(tp, 5755_PLUS))
9040 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9042 if (tg3_flag(tp, ENABLE_RSS))
9043 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9044 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9045 RX_MODE_RSS_IPV6_HASH_EN |
9046 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9047 RX_MODE_RSS_IPV4_HASH_EN |
9048 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9050 tw32_f(MAC_RX_MODE, tp->rx_mode);
9053 tw32(MAC_LED_CTRL, tp->led_ctrl);
9055 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9056 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9057 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9060 tw32_f(MAC_RX_MODE, tp->rx_mode);
9063 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9064 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9065 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9066 /* Set drive transmission level to 1.2V */
9067 /* only if the signal pre-emphasis bit is not set */
9068 val = tr32(MAC_SERDES_CFG);
9071 tw32(MAC_SERDES_CFG, val);
9073 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9074 tw32(MAC_SERDES_CFG, 0x616000);
9077 /* Prevent chip from dropping frames when flow control
9080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9084 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9086 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9087 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9088 /* Use hardware link auto-negotiation */
9089 tg3_flag_set(tp, HW_AUTONEG);
9092 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9096 tmp = tr32(SERDES_RX_CTRL);
9097 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9098 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9099 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9100 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9103 if (!tg3_flag(tp, USE_PHYLIB)) {
9104 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9105 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9106 tp->link_config.speed = tp->link_config.orig_speed;
9107 tp->link_config.duplex = tp->link_config.orig_duplex;
9108 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9111 err = tg3_setup_phy(tp, 0);
9115 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9116 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9119 /* Clear CRC stats. */
9120 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9121 tg3_writephy(tp, MII_TG3_TEST1,
9122 tmp | MII_TG3_TEST1_CRC_EN);
9123 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9128 __tg3_set_rx_mode(tp->dev);
9130 /* Initialize receive rules. */
9131 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9132 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9133 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9134 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9136 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9140 if (tg3_flag(tp, ENABLE_ASF))
9144 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9146 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9148 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9150 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9152 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9154 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9156 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9158 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9160 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9162 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9164 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9166 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9168 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9170 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9178 if (tg3_flag(tp, ENABLE_APE))
9179 /* Write our heartbeat update interval to APE. */
9180 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9181 APE_HOST_HEARTBEAT_INT_DISABLE);
9183 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9188 /* Called at device open time to get the chip ready for
9189 * packet processing. Invoked with tp->lock held.
9191 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9193 /* Chip may have been just powered on. If so, the boot code may still
9194 * be running initialization. Wait for it to finish to avoid races in
9195 * accessing the hardware.
9197 tg3_enable_register_access(tp);
9200 tg3_switch_clocks(tp);
9202 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9204 return tg3_reset_hw(tp, reset_phy);
9207 #define TG3_STAT_ADD32(PSTAT, REG) \
9208 do { u32 __val = tr32(REG); \
9209 (PSTAT)->low += __val; \
9210 if ((PSTAT)->low < __val) \
9211 (PSTAT)->high += 1; \
9214 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9216 struct tg3_hw_stats *sp = tp->hw_stats;
9218 if (!netif_carrier_ok(tp->dev))
9221 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9222 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9223 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9224 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9225 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9226 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9227 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9228 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9229 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9230 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9231 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9232 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9233 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9234 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
9235 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9236 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9239 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9240 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
9241 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9242 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
9245 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9246 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9247 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9248 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9249 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9250 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9251 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9252 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9253 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9254 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9255 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9256 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9257 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9258 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9260 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9261 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9262 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9263 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9264 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9266 u32 val = tr32(HOSTCC_FLOW_ATTN);
9267 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9269 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9270 sp->rx_discards.low += val;
9271 if (sp->rx_discards.low < val)
9272 sp->rx_discards.high += 1;
9274 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9276 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9279 static void tg3_chk_missed_msi(struct tg3 *tp)
9283 for (i = 0; i < tp->irq_cnt; i++) {
9284 struct tg3_napi *tnapi = &tp->napi[i];
9286 if (tg3_has_work(tnapi)) {
9287 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9288 tnapi->last_tx_cons == tnapi->tx_cons) {
9289 if (tnapi->chk_msi_cnt < 1) {
9290 tnapi->chk_msi_cnt++;
9296 tnapi->chk_msi_cnt = 0;
9297 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9298 tnapi->last_tx_cons = tnapi->tx_cons;
9302 static void tg3_timer(unsigned long __opaque)
9304 struct tg3 *tp = (struct tg3 *) __opaque;
9306 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9309 spin_lock(&tp->lock);
9311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9313 tg3_chk_missed_msi(tp);
9315 if (!tg3_flag(tp, TAGGED_STATUS)) {
9316 /* All of this garbage is because when using non-tagged
9317 * IRQ status the mailbox/status_block protocol the chip
9318 * uses with the cpu is race prone.
9320 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9321 tw32(GRC_LOCAL_CTRL,
9322 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9324 tw32(HOSTCC_MODE, tp->coalesce_mode |
9325 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9328 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9329 spin_unlock(&tp->lock);
9330 tg3_reset_task_schedule(tp);
9335 /* This part only runs once per second. */
9336 if (!--tp->timer_counter) {
9337 if (tg3_flag(tp, 5705_PLUS))
9338 tg3_periodic_fetch_stats(tp);
9340 if (tp->setlpicnt && !--tp->setlpicnt)
9341 tg3_phy_eee_enable(tp);
9343 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9347 mac_stat = tr32(MAC_STATUS);
9350 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9351 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9353 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9357 tg3_setup_phy(tp, 0);
9358 } else if (tg3_flag(tp, POLL_SERDES)) {
9359 u32 mac_stat = tr32(MAC_STATUS);
9362 if (netif_carrier_ok(tp->dev) &&
9363 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9366 if (!netif_carrier_ok(tp->dev) &&
9367 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9368 MAC_STATUS_SIGNAL_DET))) {
9372 if (!tp->serdes_counter) {
9375 ~MAC_MODE_PORT_MODE_MASK));
9377 tw32_f(MAC_MODE, tp->mac_mode);
9380 tg3_setup_phy(tp, 0);
9382 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9383 tg3_flag(tp, 5780_CLASS)) {
9384 tg3_serdes_parallel_detect(tp);
9387 tp->timer_counter = tp->timer_multiplier;
9390 /* Heartbeat is only sent once every 2 seconds.
9392 * The heartbeat is to tell the ASF firmware that the host
9393 * driver is still alive. In the event that the OS crashes,
9394 * ASF needs to reset the hardware to free up the FIFO space
9395 * that may be filled with rx packets destined for the host.
9396 * If the FIFO is full, ASF will no longer function properly.
9398 * Unintended resets have been reported on real time kernels
9399 * where the timer doesn't run on time. Netpoll will also have
9402 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9403 * to check the ring condition when the heartbeat is expiring
9404 * before doing the reset. This will prevent most unintended
9407 if (!--tp->asf_counter) {
9408 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9409 tg3_wait_for_event_ack(tp);
9411 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9412 FWCMD_NICDRV_ALIVE3);
9413 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9414 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9415 TG3_FW_UPDATE_TIMEOUT_SEC);
9417 tg3_generate_fw_event(tp);
9419 tp->asf_counter = tp->asf_multiplier;
9422 spin_unlock(&tp->lock);
9425 tp->timer.expires = jiffies + tp->timer_offset;
9426 add_timer(&tp->timer);
9429 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9432 unsigned long flags;
9434 struct tg3_napi *tnapi = &tp->napi[irq_num];
9436 if (tp->irq_cnt == 1)
9437 name = tp->dev->name;
9439 name = &tnapi->irq_lbl[0];
9440 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9441 name[IFNAMSIZ-1] = 0;
9444 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9446 if (tg3_flag(tp, 1SHOT_MSI))
9451 if (tg3_flag(tp, TAGGED_STATUS))
9452 fn = tg3_interrupt_tagged;
9453 flags = IRQF_SHARED;
9456 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9459 static int tg3_test_interrupt(struct tg3 *tp)
9461 struct tg3_napi *tnapi = &tp->napi[0];
9462 struct net_device *dev = tp->dev;
9463 int err, i, intr_ok = 0;
9466 if (!netif_running(dev))
9469 tg3_disable_ints(tp);
9471 free_irq(tnapi->irq_vec, tnapi);
9474 * Turn off MSI one shot mode. Otherwise this test has no
9475 * observable way to know whether the interrupt was delivered.
9477 if (tg3_flag(tp, 57765_PLUS)) {
9478 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9479 tw32(MSGINT_MODE, val);
9482 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9483 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9487 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9488 tg3_enable_ints(tp);
9490 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9493 for (i = 0; i < 5; i++) {
9494 u32 int_mbox, misc_host_ctrl;
9496 int_mbox = tr32_mailbox(tnapi->int_mbox);
9497 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9499 if ((int_mbox != 0) ||
9500 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9505 if (tg3_flag(tp, 57765_PLUS) &&
9506 tnapi->hw_status->status_tag != tnapi->last_tag)
9507 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9512 tg3_disable_ints(tp);
9514 free_irq(tnapi->irq_vec, tnapi);
9516 err = tg3_request_irq(tp, 0);
9522 /* Reenable MSI one shot mode. */
9523 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9524 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9525 tw32(MSGINT_MODE, val);
9533 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9534 * successfully restored
9536 static int tg3_test_msi(struct tg3 *tp)
9541 if (!tg3_flag(tp, USING_MSI))
9544 /* Turn off SERR reporting in case MSI terminates with Master
9547 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9548 pci_write_config_word(tp->pdev, PCI_COMMAND,
9549 pci_cmd & ~PCI_COMMAND_SERR);
9551 err = tg3_test_interrupt(tp);
9553 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9558 /* other failures */
9562 /* MSI test failed, go back to INTx mode */
9563 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9564 "to INTx mode. Please report this failure to the PCI "
9565 "maintainer and include system chipset information\n");
9567 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9569 pci_disable_msi(tp->pdev);
9571 tg3_flag_clear(tp, USING_MSI);
9572 tp->napi[0].irq_vec = tp->pdev->irq;
9574 err = tg3_request_irq(tp, 0);
9578 /* Need to reset the chip because the MSI cycle may have terminated
9579 * with Master Abort.
9581 tg3_full_lock(tp, 1);
9583 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9584 err = tg3_init_hw(tp, 1);
9586 tg3_full_unlock(tp);
9589 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9594 static int tg3_request_firmware(struct tg3 *tp)
9596 const __be32 *fw_data;
9598 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9599 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9604 fw_data = (void *)tp->fw->data;
9606 /* Firmware blob starts with version numbers, followed by
9607 * start address and _full_ length including BSS sections
9608 * (which must be longer than the actual data, of course
9611 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9612 if (tp->fw_len < (tp->fw->size - 12)) {
9613 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9614 tp->fw_len, tp->fw_needed);
9615 release_firmware(tp->fw);
9620 /* We no longer need firmware; we have it. */
9621 tp->fw_needed = NULL;
9625 static bool tg3_enable_msix(struct tg3 *tp)
9627 int i, rc, cpus = num_online_cpus();
9628 struct msix_entry msix_ent[tp->irq_max];
9631 /* Just fallback to the simpler MSI mode. */
9635 * We want as many rx rings enabled as there are cpus.
9636 * The first MSIX vector only deals with link interrupts, etc,
9637 * so we add one to the number of vectors we are requesting.
9639 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9641 for (i = 0; i < tp->irq_max; i++) {
9642 msix_ent[i].entry = i;
9643 msix_ent[i].vector = 0;
9646 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9649 } else if (rc != 0) {
9650 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9652 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9657 for (i = 0; i < tp->irq_max; i++)
9658 tp->napi[i].irq_vec = msix_ent[i].vector;
9660 netif_set_real_num_tx_queues(tp->dev, 1);
9661 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9662 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9663 pci_disable_msix(tp->pdev);
9667 if (tp->irq_cnt > 1) {
9668 tg3_flag_set(tp, ENABLE_RSS);
9670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9672 tg3_flag_set(tp, ENABLE_TSS);
9673 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9680 static void tg3_ints_init(struct tg3 *tp)
9682 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9683 !tg3_flag(tp, TAGGED_STATUS)) {
9684 /* All MSI supporting chips should support tagged
9685 * status. Assert that this is the case.
9687 netdev_warn(tp->dev,
9688 "MSI without TAGGED_STATUS? Not using MSI\n");
9692 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9693 tg3_flag_set(tp, USING_MSIX);
9694 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9695 tg3_flag_set(tp, USING_MSI);
9697 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9698 u32 msi_mode = tr32(MSGINT_MODE);
9699 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9700 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9701 if (!tg3_flag(tp, 1SHOT_MSI))
9702 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9703 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9706 if (!tg3_flag(tp, USING_MSIX)) {
9708 tp->napi[0].irq_vec = tp->pdev->irq;
9709 netif_set_real_num_tx_queues(tp->dev, 1);
9710 netif_set_real_num_rx_queues(tp->dev, 1);
9714 static void tg3_ints_fini(struct tg3 *tp)
9716 if (tg3_flag(tp, USING_MSIX))
9717 pci_disable_msix(tp->pdev);
9718 else if (tg3_flag(tp, USING_MSI))
9719 pci_disable_msi(tp->pdev);
9720 tg3_flag_clear(tp, USING_MSI);
9721 tg3_flag_clear(tp, USING_MSIX);
9722 tg3_flag_clear(tp, ENABLE_RSS);
9723 tg3_flag_clear(tp, ENABLE_TSS);
9726 static int tg3_open(struct net_device *dev)
9728 struct tg3 *tp = netdev_priv(dev);
9731 if (tp->fw_needed) {
9732 err = tg3_request_firmware(tp);
9733 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9737 netdev_warn(tp->dev, "TSO capability disabled\n");
9738 tg3_flag_clear(tp, TSO_CAPABLE);
9739 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9740 netdev_notice(tp->dev, "TSO capability restored\n");
9741 tg3_flag_set(tp, TSO_CAPABLE);
9745 netif_carrier_off(tp->dev);
9747 err = tg3_power_up(tp);
9751 tg3_full_lock(tp, 0);
9753 tg3_disable_ints(tp);
9754 tg3_flag_clear(tp, INIT_COMPLETE);
9756 tg3_full_unlock(tp);
9759 * Setup interrupts first so we know how
9760 * many NAPI resources to allocate
9764 /* The placement of this call is tied
9765 * to the setup and use of Host TX descriptors.
9767 err = tg3_alloc_consistent(tp);
9773 tg3_napi_enable(tp);
9775 for (i = 0; i < tp->irq_cnt; i++) {
9776 struct tg3_napi *tnapi = &tp->napi[i];
9777 err = tg3_request_irq(tp, i);
9779 for (i--; i >= 0; i--) {
9780 tnapi = &tp->napi[i];
9781 free_irq(tnapi->irq_vec, tnapi);
9787 tg3_full_lock(tp, 0);
9789 err = tg3_init_hw(tp, 1);
9791 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9794 if (tg3_flag(tp, TAGGED_STATUS) &&
9795 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9796 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9797 tp->timer_offset = HZ;
9799 tp->timer_offset = HZ / 10;
9801 BUG_ON(tp->timer_offset > HZ);
9802 tp->timer_counter = tp->timer_multiplier =
9803 (HZ / tp->timer_offset);
9804 tp->asf_counter = tp->asf_multiplier =
9805 ((HZ / tp->timer_offset) * 2);
9807 init_timer(&tp->timer);
9808 tp->timer.expires = jiffies + tp->timer_offset;
9809 tp->timer.data = (unsigned long) tp;
9810 tp->timer.function = tg3_timer;
9813 tg3_full_unlock(tp);
9818 if (tg3_flag(tp, USING_MSI)) {
9819 err = tg3_test_msi(tp);
9822 tg3_full_lock(tp, 0);
9823 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9825 tg3_full_unlock(tp);
9830 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9831 u32 val = tr32(PCIE_TRANSACTION_CFG);
9833 tw32(PCIE_TRANSACTION_CFG,
9834 val | PCIE_TRANS_CFG_1SHOT_MSI);
9840 tg3_full_lock(tp, 0);
9842 add_timer(&tp->timer);
9843 tg3_flag_set(tp, INIT_COMPLETE);
9844 tg3_enable_ints(tp);
9846 tg3_full_unlock(tp);
9848 netif_tx_start_all_queues(dev);
9851 * Reset loopback feature if it was turned on while the device was down
9852 * make sure that it's installed properly now.
9854 if (dev->features & NETIF_F_LOOPBACK)
9855 tg3_set_loopback(dev, dev->features);
9860 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9861 struct tg3_napi *tnapi = &tp->napi[i];
9862 free_irq(tnapi->irq_vec, tnapi);
9866 tg3_napi_disable(tp);
9868 tg3_free_consistent(tp);
9872 tg3_frob_aux_power(tp, false);
9873 pci_set_power_state(tp->pdev, PCI_D3hot);
9877 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9878 struct rtnl_link_stats64 *);
9879 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9881 static int tg3_close(struct net_device *dev)
9884 struct tg3 *tp = netdev_priv(dev);
9886 tg3_napi_disable(tp);
9887 tg3_reset_task_cancel(tp);
9889 netif_tx_stop_all_queues(dev);
9891 del_timer_sync(&tp->timer);
9895 tg3_full_lock(tp, 1);
9897 tg3_disable_ints(tp);
9899 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9901 tg3_flag_clear(tp, INIT_COMPLETE);
9903 tg3_full_unlock(tp);
9905 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9906 struct tg3_napi *tnapi = &tp->napi[i];
9907 free_irq(tnapi->irq_vec, tnapi);
9912 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9914 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9915 sizeof(tp->estats_prev));
9919 tg3_free_consistent(tp);
9923 netif_carrier_off(tp->dev);
9928 static inline u64 get_stat64(tg3_stat64_t *val)
9930 return ((u64)val->high << 32) | ((u64)val->low);
9933 static u64 calc_crc_errors(struct tg3 *tp)
9935 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9937 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9938 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9942 spin_lock_bh(&tp->lock);
9943 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9944 tg3_writephy(tp, MII_TG3_TEST1,
9945 val | MII_TG3_TEST1_CRC_EN);
9946 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9949 spin_unlock_bh(&tp->lock);
9951 tp->phy_crc_errors += val;
9953 return tp->phy_crc_errors;
9956 return get_stat64(&hw_stats->rx_fcs_errors);
9959 #define ESTAT_ADD(member) \
9960 estats->member = old_estats->member + \
9961 get_stat64(&hw_stats->member)
9963 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9965 struct tg3_ethtool_stats *estats = &tp->estats;
9966 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9967 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9972 ESTAT_ADD(rx_octets);
9973 ESTAT_ADD(rx_fragments);
9974 ESTAT_ADD(rx_ucast_packets);
9975 ESTAT_ADD(rx_mcast_packets);
9976 ESTAT_ADD(rx_bcast_packets);
9977 ESTAT_ADD(rx_fcs_errors);
9978 ESTAT_ADD(rx_align_errors);
9979 ESTAT_ADD(rx_xon_pause_rcvd);
9980 ESTAT_ADD(rx_xoff_pause_rcvd);
9981 ESTAT_ADD(rx_mac_ctrl_rcvd);
9982 ESTAT_ADD(rx_xoff_entered);
9983 ESTAT_ADD(rx_frame_too_long_errors);
9984 ESTAT_ADD(rx_jabbers);
9985 ESTAT_ADD(rx_undersize_packets);
9986 ESTAT_ADD(rx_in_length_errors);
9987 ESTAT_ADD(rx_out_length_errors);
9988 ESTAT_ADD(rx_64_or_less_octet_packets);
9989 ESTAT_ADD(rx_65_to_127_octet_packets);
9990 ESTAT_ADD(rx_128_to_255_octet_packets);
9991 ESTAT_ADD(rx_256_to_511_octet_packets);
9992 ESTAT_ADD(rx_512_to_1023_octet_packets);
9993 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9994 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9995 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9996 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9997 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9999 ESTAT_ADD(tx_octets);
10000 ESTAT_ADD(tx_collisions);
10001 ESTAT_ADD(tx_xon_sent);
10002 ESTAT_ADD(tx_xoff_sent);
10003 ESTAT_ADD(tx_flow_control);
10004 ESTAT_ADD(tx_mac_errors);
10005 ESTAT_ADD(tx_single_collisions);
10006 ESTAT_ADD(tx_mult_collisions);
10007 ESTAT_ADD(tx_deferred);
10008 ESTAT_ADD(tx_excessive_collisions);
10009 ESTAT_ADD(tx_late_collisions);
10010 ESTAT_ADD(tx_collide_2times);
10011 ESTAT_ADD(tx_collide_3times);
10012 ESTAT_ADD(tx_collide_4times);
10013 ESTAT_ADD(tx_collide_5times);
10014 ESTAT_ADD(tx_collide_6times);
10015 ESTAT_ADD(tx_collide_7times);
10016 ESTAT_ADD(tx_collide_8times);
10017 ESTAT_ADD(tx_collide_9times);
10018 ESTAT_ADD(tx_collide_10times);
10019 ESTAT_ADD(tx_collide_11times);
10020 ESTAT_ADD(tx_collide_12times);
10021 ESTAT_ADD(tx_collide_13times);
10022 ESTAT_ADD(tx_collide_14times);
10023 ESTAT_ADD(tx_collide_15times);
10024 ESTAT_ADD(tx_ucast_packets);
10025 ESTAT_ADD(tx_mcast_packets);
10026 ESTAT_ADD(tx_bcast_packets);
10027 ESTAT_ADD(tx_carrier_sense_errors);
10028 ESTAT_ADD(tx_discards);
10029 ESTAT_ADD(tx_errors);
10031 ESTAT_ADD(dma_writeq_full);
10032 ESTAT_ADD(dma_write_prioq_full);
10033 ESTAT_ADD(rxbds_empty);
10034 ESTAT_ADD(rx_discards);
10035 ESTAT_ADD(rx_errors);
10036 ESTAT_ADD(rx_threshold_hit);
10038 ESTAT_ADD(dma_readq_full);
10039 ESTAT_ADD(dma_read_prioq_full);
10040 ESTAT_ADD(tx_comp_queue_full);
10042 ESTAT_ADD(ring_set_send_prod_index);
10043 ESTAT_ADD(ring_status_update);
10044 ESTAT_ADD(nic_irqs);
10045 ESTAT_ADD(nic_avoided_irqs);
10046 ESTAT_ADD(nic_tx_threshold_hit);
10048 ESTAT_ADD(mbuf_lwm_thresh_hit);
10053 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10054 struct rtnl_link_stats64 *stats)
10056 struct tg3 *tp = netdev_priv(dev);
10057 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10058 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10063 stats->rx_packets = old_stats->rx_packets +
10064 get_stat64(&hw_stats->rx_ucast_packets) +
10065 get_stat64(&hw_stats->rx_mcast_packets) +
10066 get_stat64(&hw_stats->rx_bcast_packets);
10068 stats->tx_packets = old_stats->tx_packets +
10069 get_stat64(&hw_stats->tx_ucast_packets) +
10070 get_stat64(&hw_stats->tx_mcast_packets) +
10071 get_stat64(&hw_stats->tx_bcast_packets);
10073 stats->rx_bytes = old_stats->rx_bytes +
10074 get_stat64(&hw_stats->rx_octets);
10075 stats->tx_bytes = old_stats->tx_bytes +
10076 get_stat64(&hw_stats->tx_octets);
10078 stats->rx_errors = old_stats->rx_errors +
10079 get_stat64(&hw_stats->rx_errors);
10080 stats->tx_errors = old_stats->tx_errors +
10081 get_stat64(&hw_stats->tx_errors) +
10082 get_stat64(&hw_stats->tx_mac_errors) +
10083 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10084 get_stat64(&hw_stats->tx_discards);
10086 stats->multicast = old_stats->multicast +
10087 get_stat64(&hw_stats->rx_mcast_packets);
10088 stats->collisions = old_stats->collisions +
10089 get_stat64(&hw_stats->tx_collisions);
10091 stats->rx_length_errors = old_stats->rx_length_errors +
10092 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10093 get_stat64(&hw_stats->rx_undersize_packets);
10095 stats->rx_over_errors = old_stats->rx_over_errors +
10096 get_stat64(&hw_stats->rxbds_empty);
10097 stats->rx_frame_errors = old_stats->rx_frame_errors +
10098 get_stat64(&hw_stats->rx_align_errors);
10099 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10100 get_stat64(&hw_stats->tx_discards);
10101 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10102 get_stat64(&hw_stats->tx_carrier_sense_errors);
10104 stats->rx_crc_errors = old_stats->rx_crc_errors +
10105 calc_crc_errors(tp);
10107 stats->rx_missed_errors = old_stats->rx_missed_errors +
10108 get_stat64(&hw_stats->rx_discards);
10110 stats->rx_dropped = tp->rx_dropped;
10111 stats->tx_dropped = tp->tx_dropped;
10116 static inline u32 calc_crc(unsigned char *buf, int len)
10124 for (j = 0; j < len; j++) {
10127 for (k = 0; k < 8; k++) {
10140 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10142 /* accept or reject all multicast frames */
10143 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10144 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10145 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10146 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10149 static void __tg3_set_rx_mode(struct net_device *dev)
10151 struct tg3 *tp = netdev_priv(dev);
10154 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10155 RX_MODE_KEEP_VLAN_TAG);
10157 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10158 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10161 if (!tg3_flag(tp, ENABLE_ASF))
10162 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10165 if (dev->flags & IFF_PROMISC) {
10166 /* Promiscuous mode. */
10167 rx_mode |= RX_MODE_PROMISC;
10168 } else if (dev->flags & IFF_ALLMULTI) {
10169 /* Accept all multicast. */
10170 tg3_set_multi(tp, 1);
10171 } else if (netdev_mc_empty(dev)) {
10172 /* Reject all multicast. */
10173 tg3_set_multi(tp, 0);
10175 /* Accept one or more multicast(s). */
10176 struct netdev_hw_addr *ha;
10177 u32 mc_filter[4] = { 0, };
10182 netdev_for_each_mc_addr(ha, dev) {
10183 crc = calc_crc(ha->addr, ETH_ALEN);
10185 regidx = (bit & 0x60) >> 5;
10187 mc_filter[regidx] |= (1 << bit);
10190 tw32(MAC_HASH_REG_0, mc_filter[0]);
10191 tw32(MAC_HASH_REG_1, mc_filter[1]);
10192 tw32(MAC_HASH_REG_2, mc_filter[2]);
10193 tw32(MAC_HASH_REG_3, mc_filter[3]);
10196 if (rx_mode != tp->rx_mode) {
10197 tp->rx_mode = rx_mode;
10198 tw32_f(MAC_RX_MODE, rx_mode);
10203 static void tg3_set_rx_mode(struct net_device *dev)
10205 struct tg3 *tp = netdev_priv(dev);
10207 if (!netif_running(dev))
10210 tg3_full_lock(tp, 0);
10211 __tg3_set_rx_mode(dev);
10212 tg3_full_unlock(tp);
10215 static int tg3_get_regs_len(struct net_device *dev)
10217 return TG3_REG_BLK_SIZE;
10220 static void tg3_get_regs(struct net_device *dev,
10221 struct ethtool_regs *regs, void *_p)
10223 struct tg3 *tp = netdev_priv(dev);
10227 memset(_p, 0, TG3_REG_BLK_SIZE);
10229 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10232 tg3_full_lock(tp, 0);
10234 tg3_dump_legacy_regs(tp, (u32 *)_p);
10236 tg3_full_unlock(tp);
10239 static int tg3_get_eeprom_len(struct net_device *dev)
10241 struct tg3 *tp = netdev_priv(dev);
10243 return tp->nvram_size;
10246 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10248 struct tg3 *tp = netdev_priv(dev);
10251 u32 i, offset, len, b_offset, b_count;
10254 if (tg3_flag(tp, NO_NVRAM))
10257 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10260 offset = eeprom->offset;
10264 eeprom->magic = TG3_EEPROM_MAGIC;
10267 /* adjustments to start on required 4 byte boundary */
10268 b_offset = offset & 3;
10269 b_count = 4 - b_offset;
10270 if (b_count > len) {
10271 /* i.e. offset=1 len=2 */
10274 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10277 memcpy(data, ((char *)&val) + b_offset, b_count);
10280 eeprom->len += b_count;
10283 /* read bytes up to the last 4 byte boundary */
10284 pd = &data[eeprom->len];
10285 for (i = 0; i < (len - (len & 3)); i += 4) {
10286 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10291 memcpy(pd + i, &val, 4);
10296 /* read last bytes not ending on 4 byte boundary */
10297 pd = &data[eeprom->len];
10299 b_offset = offset + len - b_count;
10300 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10303 memcpy(pd, &val, b_count);
10304 eeprom->len += b_count;
10309 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10311 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10313 struct tg3 *tp = netdev_priv(dev);
10315 u32 offset, len, b_offset, odd_len;
10319 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10322 if (tg3_flag(tp, NO_NVRAM) ||
10323 eeprom->magic != TG3_EEPROM_MAGIC)
10326 offset = eeprom->offset;
10329 if ((b_offset = (offset & 3))) {
10330 /* adjustments to start on required 4 byte boundary */
10331 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10342 /* adjustments to end on required 4 byte boundary */
10344 len = (len + 3) & ~3;
10345 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10351 if (b_offset || odd_len) {
10352 buf = kmalloc(len, GFP_KERNEL);
10356 memcpy(buf, &start, 4);
10358 memcpy(buf+len-4, &end, 4);
10359 memcpy(buf + b_offset, data, eeprom->len);
10362 ret = tg3_nvram_write_block(tp, offset, len, buf);
10370 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10372 struct tg3 *tp = netdev_priv(dev);
10374 if (tg3_flag(tp, USE_PHYLIB)) {
10375 struct phy_device *phydev;
10376 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10378 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10379 return phy_ethtool_gset(phydev, cmd);
10382 cmd->supported = (SUPPORTED_Autoneg);
10384 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10385 cmd->supported |= (SUPPORTED_1000baseT_Half |
10386 SUPPORTED_1000baseT_Full);
10388 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10389 cmd->supported |= (SUPPORTED_100baseT_Half |
10390 SUPPORTED_100baseT_Full |
10391 SUPPORTED_10baseT_Half |
10392 SUPPORTED_10baseT_Full |
10394 cmd->port = PORT_TP;
10396 cmd->supported |= SUPPORTED_FIBRE;
10397 cmd->port = PORT_FIBRE;
10400 cmd->advertising = tp->link_config.advertising;
10401 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10402 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10403 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10404 cmd->advertising |= ADVERTISED_Pause;
10406 cmd->advertising |= ADVERTISED_Pause |
10407 ADVERTISED_Asym_Pause;
10409 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10410 cmd->advertising |= ADVERTISED_Asym_Pause;
10413 if (netif_running(dev)) {
10414 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10415 cmd->duplex = tp->link_config.active_duplex;
10417 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10418 cmd->duplex = DUPLEX_INVALID;
10420 cmd->phy_address = tp->phy_addr;
10421 cmd->transceiver = XCVR_INTERNAL;
10422 cmd->autoneg = tp->link_config.autoneg;
10428 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10430 struct tg3 *tp = netdev_priv(dev);
10431 u32 speed = ethtool_cmd_speed(cmd);
10433 if (tg3_flag(tp, USE_PHYLIB)) {
10434 struct phy_device *phydev;
10435 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10437 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10438 return phy_ethtool_sset(phydev, cmd);
10441 if (cmd->autoneg != AUTONEG_ENABLE &&
10442 cmd->autoneg != AUTONEG_DISABLE)
10445 if (cmd->autoneg == AUTONEG_DISABLE &&
10446 cmd->duplex != DUPLEX_FULL &&
10447 cmd->duplex != DUPLEX_HALF)
10450 if (cmd->autoneg == AUTONEG_ENABLE) {
10451 u32 mask = ADVERTISED_Autoneg |
10453 ADVERTISED_Asym_Pause;
10455 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10456 mask |= ADVERTISED_1000baseT_Half |
10457 ADVERTISED_1000baseT_Full;
10459 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10460 mask |= ADVERTISED_100baseT_Half |
10461 ADVERTISED_100baseT_Full |
10462 ADVERTISED_10baseT_Half |
10463 ADVERTISED_10baseT_Full |
10466 mask |= ADVERTISED_FIBRE;
10468 if (cmd->advertising & ~mask)
10471 mask &= (ADVERTISED_1000baseT_Half |
10472 ADVERTISED_1000baseT_Full |
10473 ADVERTISED_100baseT_Half |
10474 ADVERTISED_100baseT_Full |
10475 ADVERTISED_10baseT_Half |
10476 ADVERTISED_10baseT_Full);
10478 cmd->advertising &= mask;
10480 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10481 if (speed != SPEED_1000)
10484 if (cmd->duplex != DUPLEX_FULL)
10487 if (speed != SPEED_100 &&
10493 tg3_full_lock(tp, 0);
10495 tp->link_config.autoneg = cmd->autoneg;
10496 if (cmd->autoneg == AUTONEG_ENABLE) {
10497 tp->link_config.advertising = (cmd->advertising |
10498 ADVERTISED_Autoneg);
10499 tp->link_config.speed = SPEED_INVALID;
10500 tp->link_config.duplex = DUPLEX_INVALID;
10502 tp->link_config.advertising = 0;
10503 tp->link_config.speed = speed;
10504 tp->link_config.duplex = cmd->duplex;
10507 tp->link_config.orig_speed = tp->link_config.speed;
10508 tp->link_config.orig_duplex = tp->link_config.duplex;
10509 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10511 if (netif_running(dev))
10512 tg3_setup_phy(tp, 1);
10514 tg3_full_unlock(tp);
10519 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10521 struct tg3 *tp = netdev_priv(dev);
10523 strcpy(info->driver, DRV_MODULE_NAME);
10524 strcpy(info->version, DRV_MODULE_VERSION);
10525 strcpy(info->fw_version, tp->fw_ver);
10526 strcpy(info->bus_info, pci_name(tp->pdev));
10529 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10531 struct tg3 *tp = netdev_priv(dev);
10533 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10534 wol->supported = WAKE_MAGIC;
10536 wol->supported = 0;
10538 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10539 wol->wolopts = WAKE_MAGIC;
10540 memset(&wol->sopass, 0, sizeof(wol->sopass));
10543 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10545 struct tg3 *tp = netdev_priv(dev);
10546 struct device *dp = &tp->pdev->dev;
10548 if (wol->wolopts & ~WAKE_MAGIC)
10550 if ((wol->wolopts & WAKE_MAGIC) &&
10551 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10554 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10556 spin_lock_bh(&tp->lock);
10557 if (device_may_wakeup(dp))
10558 tg3_flag_set(tp, WOL_ENABLE);
10560 tg3_flag_clear(tp, WOL_ENABLE);
10561 spin_unlock_bh(&tp->lock);
10566 static u32 tg3_get_msglevel(struct net_device *dev)
10568 struct tg3 *tp = netdev_priv(dev);
10569 return tp->msg_enable;
10572 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10574 struct tg3 *tp = netdev_priv(dev);
10575 tp->msg_enable = value;
10578 static int tg3_nway_reset(struct net_device *dev)
10580 struct tg3 *tp = netdev_priv(dev);
10583 if (!netif_running(dev))
10586 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10589 if (tg3_flag(tp, USE_PHYLIB)) {
10590 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10592 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10596 spin_lock_bh(&tp->lock);
10598 tg3_readphy(tp, MII_BMCR, &bmcr);
10599 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10600 ((bmcr & BMCR_ANENABLE) ||
10601 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10602 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10606 spin_unlock_bh(&tp->lock);
10612 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10614 struct tg3 *tp = netdev_priv(dev);
10616 ering->rx_max_pending = tp->rx_std_ring_mask;
10617 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10618 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10620 ering->rx_jumbo_max_pending = 0;
10622 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10624 ering->rx_pending = tp->rx_pending;
10625 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10626 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10628 ering->rx_jumbo_pending = 0;
10630 ering->tx_pending = tp->napi[0].tx_pending;
10633 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10635 struct tg3 *tp = netdev_priv(dev);
10636 int i, irq_sync = 0, err = 0;
10638 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10639 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10640 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10641 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10642 (tg3_flag(tp, TSO_BUG) &&
10643 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10646 if (netif_running(dev)) {
10648 tg3_netif_stop(tp);
10652 tg3_full_lock(tp, irq_sync);
10654 tp->rx_pending = ering->rx_pending;
10656 if (tg3_flag(tp, MAX_RXPEND_64) &&
10657 tp->rx_pending > 63)
10658 tp->rx_pending = 63;
10659 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10661 for (i = 0; i < tp->irq_max; i++)
10662 tp->napi[i].tx_pending = ering->tx_pending;
10664 if (netif_running(dev)) {
10665 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10666 err = tg3_restart_hw(tp, 1);
10668 tg3_netif_start(tp);
10671 tg3_full_unlock(tp);
10673 if (irq_sync && !err)
10679 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10681 struct tg3 *tp = netdev_priv(dev);
10683 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10685 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10686 epause->rx_pause = 1;
10688 epause->rx_pause = 0;
10690 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10691 epause->tx_pause = 1;
10693 epause->tx_pause = 0;
10696 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10698 struct tg3 *tp = netdev_priv(dev);
10701 if (tg3_flag(tp, USE_PHYLIB)) {
10703 struct phy_device *phydev;
10705 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10707 if (!(phydev->supported & SUPPORTED_Pause) ||
10708 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10709 (epause->rx_pause != epause->tx_pause)))
10712 tp->link_config.flowctrl = 0;
10713 if (epause->rx_pause) {
10714 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10716 if (epause->tx_pause) {
10717 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10718 newadv = ADVERTISED_Pause;
10720 newadv = ADVERTISED_Pause |
10721 ADVERTISED_Asym_Pause;
10722 } else if (epause->tx_pause) {
10723 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10724 newadv = ADVERTISED_Asym_Pause;
10728 if (epause->autoneg)
10729 tg3_flag_set(tp, PAUSE_AUTONEG);
10731 tg3_flag_clear(tp, PAUSE_AUTONEG);
10733 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10734 u32 oldadv = phydev->advertising &
10735 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10736 if (oldadv != newadv) {
10737 phydev->advertising &=
10738 ~(ADVERTISED_Pause |
10739 ADVERTISED_Asym_Pause);
10740 phydev->advertising |= newadv;
10741 if (phydev->autoneg) {
10743 * Always renegotiate the link to
10744 * inform our link partner of our
10745 * flow control settings, even if the
10746 * flow control is forced. Let
10747 * tg3_adjust_link() do the final
10748 * flow control setup.
10750 return phy_start_aneg(phydev);
10754 if (!epause->autoneg)
10755 tg3_setup_flow_control(tp, 0, 0);
10757 tp->link_config.orig_advertising &=
10758 ~(ADVERTISED_Pause |
10759 ADVERTISED_Asym_Pause);
10760 tp->link_config.orig_advertising |= newadv;
10765 if (netif_running(dev)) {
10766 tg3_netif_stop(tp);
10770 tg3_full_lock(tp, irq_sync);
10772 if (epause->autoneg)
10773 tg3_flag_set(tp, PAUSE_AUTONEG);
10775 tg3_flag_clear(tp, PAUSE_AUTONEG);
10776 if (epause->rx_pause)
10777 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10779 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10780 if (epause->tx_pause)
10781 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10783 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10785 if (netif_running(dev)) {
10786 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10787 err = tg3_restart_hw(tp, 1);
10789 tg3_netif_start(tp);
10792 tg3_full_unlock(tp);
10798 static int tg3_get_sset_count(struct net_device *dev, int sset)
10802 return TG3_NUM_TEST;
10804 return TG3_NUM_STATS;
10806 return -EOPNOTSUPP;
10810 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10812 switch (stringset) {
10814 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10817 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10820 WARN_ON(1); /* we need a WARN() */
10825 static int tg3_set_phys_id(struct net_device *dev,
10826 enum ethtool_phys_id_state state)
10828 struct tg3 *tp = netdev_priv(dev);
10830 if (!netif_running(tp->dev))
10834 case ETHTOOL_ID_ACTIVE:
10835 return 1; /* cycle on/off once per second */
10837 case ETHTOOL_ID_ON:
10838 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10839 LED_CTRL_1000MBPS_ON |
10840 LED_CTRL_100MBPS_ON |
10841 LED_CTRL_10MBPS_ON |
10842 LED_CTRL_TRAFFIC_OVERRIDE |
10843 LED_CTRL_TRAFFIC_BLINK |
10844 LED_CTRL_TRAFFIC_LED);
10847 case ETHTOOL_ID_OFF:
10848 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10849 LED_CTRL_TRAFFIC_OVERRIDE);
10852 case ETHTOOL_ID_INACTIVE:
10853 tw32(MAC_LED_CTRL, tp->led_ctrl);
10860 static void tg3_get_ethtool_stats(struct net_device *dev,
10861 struct ethtool_stats *estats, u64 *tmp_stats)
10863 struct tg3 *tp = netdev_priv(dev);
10864 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10867 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10871 u32 offset = 0, len = 0;
10874 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10877 if (magic == TG3_EEPROM_MAGIC) {
10878 for (offset = TG3_NVM_DIR_START;
10879 offset < TG3_NVM_DIR_END;
10880 offset += TG3_NVM_DIRENT_SIZE) {
10881 if (tg3_nvram_read(tp, offset, &val))
10884 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10885 TG3_NVM_DIRTYPE_EXTVPD)
10889 if (offset != TG3_NVM_DIR_END) {
10890 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10891 if (tg3_nvram_read(tp, offset + 4, &offset))
10894 offset = tg3_nvram_logical_addr(tp, offset);
10898 if (!offset || !len) {
10899 offset = TG3_NVM_VPD_OFF;
10900 len = TG3_NVM_VPD_LEN;
10903 buf = kmalloc(len, GFP_KERNEL);
10907 if (magic == TG3_EEPROM_MAGIC) {
10908 for (i = 0; i < len; i += 4) {
10909 /* The data is in little-endian format in NVRAM.
10910 * Use the big-endian read routines to preserve
10911 * the byte order as it exists in NVRAM.
10913 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10919 unsigned int pos = 0;
10921 ptr = (u8 *)&buf[0];
10922 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10923 cnt = pci_read_vpd(tp->pdev, pos,
10925 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10943 #define NVRAM_TEST_SIZE 0x100
10944 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10945 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10946 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10947 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10948 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10949 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10950 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10951 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10953 static int tg3_test_nvram(struct tg3 *tp)
10955 u32 csum, magic, len;
10957 int i, j, k, err = 0, size;
10959 if (tg3_flag(tp, NO_NVRAM))
10962 if (tg3_nvram_read(tp, 0, &magic) != 0)
10965 if (magic == TG3_EEPROM_MAGIC)
10966 size = NVRAM_TEST_SIZE;
10967 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10968 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10969 TG3_EEPROM_SB_FORMAT_1) {
10970 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10971 case TG3_EEPROM_SB_REVISION_0:
10972 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10974 case TG3_EEPROM_SB_REVISION_2:
10975 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10977 case TG3_EEPROM_SB_REVISION_3:
10978 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10980 case TG3_EEPROM_SB_REVISION_4:
10981 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10983 case TG3_EEPROM_SB_REVISION_5:
10984 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10986 case TG3_EEPROM_SB_REVISION_6:
10987 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10994 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10995 size = NVRAM_SELFBOOT_HW_SIZE;
10999 buf = kmalloc(size, GFP_KERNEL);
11004 for (i = 0, j = 0; i < size; i += 4, j++) {
11005 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11012 /* Selfboot format */
11013 magic = be32_to_cpu(buf[0]);
11014 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11015 TG3_EEPROM_MAGIC_FW) {
11016 u8 *buf8 = (u8 *) buf, csum8 = 0;
11018 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11019 TG3_EEPROM_SB_REVISION_2) {
11020 /* For rev 2, the csum doesn't include the MBA. */
11021 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11023 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11026 for (i = 0; i < size; i++)
11039 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11040 TG3_EEPROM_MAGIC_HW) {
11041 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11042 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11043 u8 *buf8 = (u8 *) buf;
11045 /* Separate the parity bits and the data bytes. */
11046 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11047 if ((i == 0) || (i == 8)) {
11051 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11052 parity[k++] = buf8[i] & msk;
11054 } else if (i == 16) {
11058 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11059 parity[k++] = buf8[i] & msk;
11062 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11063 parity[k++] = buf8[i] & msk;
11066 data[j++] = buf8[i];
11070 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11071 u8 hw8 = hweight8(data[i]);
11073 if ((hw8 & 0x1) && parity[i])
11075 else if (!(hw8 & 0x1) && !parity[i])
11084 /* Bootstrap checksum at offset 0x10 */
11085 csum = calc_crc((unsigned char *) buf, 0x10);
11086 if (csum != le32_to_cpu(buf[0x10/4]))
11089 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11090 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11091 if (csum != le32_to_cpu(buf[0xfc/4]))
11096 buf = tg3_vpd_readblock(tp, &len);
11100 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11102 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11106 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11109 i += PCI_VPD_LRDT_TAG_SIZE;
11110 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11111 PCI_VPD_RO_KEYWORD_CHKSUM);
11115 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11117 for (i = 0; i <= j; i++)
11118 csum8 += ((u8 *)buf)[i];
11132 #define TG3_SERDES_TIMEOUT_SEC 2
11133 #define TG3_COPPER_TIMEOUT_SEC 6
11135 static int tg3_test_link(struct tg3 *tp)
11139 if (!netif_running(tp->dev))
11142 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11143 max = TG3_SERDES_TIMEOUT_SEC;
11145 max = TG3_COPPER_TIMEOUT_SEC;
11147 for (i = 0; i < max; i++) {
11148 if (netif_carrier_ok(tp->dev))
11151 if (msleep_interruptible(1000))
11158 /* Only test the commonly used registers */
11159 static int tg3_test_registers(struct tg3 *tp)
11161 int i, is_5705, is_5750;
11162 u32 offset, read_mask, write_mask, val, save_val, read_val;
11166 #define TG3_FL_5705 0x1
11167 #define TG3_FL_NOT_5705 0x2
11168 #define TG3_FL_NOT_5788 0x4
11169 #define TG3_FL_NOT_5750 0x8
11173 /* MAC Control Registers */
11174 { MAC_MODE, TG3_FL_NOT_5705,
11175 0x00000000, 0x00ef6f8c },
11176 { MAC_MODE, TG3_FL_5705,
11177 0x00000000, 0x01ef6b8c },
11178 { MAC_STATUS, TG3_FL_NOT_5705,
11179 0x03800107, 0x00000000 },
11180 { MAC_STATUS, TG3_FL_5705,
11181 0x03800100, 0x00000000 },
11182 { MAC_ADDR_0_HIGH, 0x0000,
11183 0x00000000, 0x0000ffff },
11184 { MAC_ADDR_0_LOW, 0x0000,
11185 0x00000000, 0xffffffff },
11186 { MAC_RX_MTU_SIZE, 0x0000,
11187 0x00000000, 0x0000ffff },
11188 { MAC_TX_MODE, 0x0000,
11189 0x00000000, 0x00000070 },
11190 { MAC_TX_LENGTHS, 0x0000,
11191 0x00000000, 0x00003fff },
11192 { MAC_RX_MODE, TG3_FL_NOT_5705,
11193 0x00000000, 0x000007fc },
11194 { MAC_RX_MODE, TG3_FL_5705,
11195 0x00000000, 0x000007dc },
11196 { MAC_HASH_REG_0, 0x0000,
11197 0x00000000, 0xffffffff },
11198 { MAC_HASH_REG_1, 0x0000,
11199 0x00000000, 0xffffffff },
11200 { MAC_HASH_REG_2, 0x0000,
11201 0x00000000, 0xffffffff },
11202 { MAC_HASH_REG_3, 0x0000,
11203 0x00000000, 0xffffffff },
11205 /* Receive Data and Receive BD Initiator Control Registers. */
11206 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11207 0x00000000, 0xffffffff },
11208 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11209 0x00000000, 0xffffffff },
11210 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11211 0x00000000, 0x00000003 },
11212 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11213 0x00000000, 0xffffffff },
11214 { RCVDBDI_STD_BD+0, 0x0000,
11215 0x00000000, 0xffffffff },
11216 { RCVDBDI_STD_BD+4, 0x0000,
11217 0x00000000, 0xffffffff },
11218 { RCVDBDI_STD_BD+8, 0x0000,
11219 0x00000000, 0xffff0002 },
11220 { RCVDBDI_STD_BD+0xc, 0x0000,
11221 0x00000000, 0xffffffff },
11223 /* Receive BD Initiator Control Registers. */
11224 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11225 0x00000000, 0xffffffff },
11226 { RCVBDI_STD_THRESH, TG3_FL_5705,
11227 0x00000000, 0x000003ff },
11228 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11229 0x00000000, 0xffffffff },
11231 /* Host Coalescing Control Registers. */
11232 { HOSTCC_MODE, TG3_FL_NOT_5705,
11233 0x00000000, 0x00000004 },
11234 { HOSTCC_MODE, TG3_FL_5705,
11235 0x00000000, 0x000000f6 },
11236 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11237 0x00000000, 0xffffffff },
11238 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11239 0x00000000, 0x000003ff },
11240 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11241 0x00000000, 0xffffffff },
11242 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11243 0x00000000, 0x000003ff },
11244 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11245 0x00000000, 0xffffffff },
11246 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11247 0x00000000, 0x000000ff },
11248 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11249 0x00000000, 0xffffffff },
11250 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11251 0x00000000, 0x000000ff },
11252 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11253 0x00000000, 0xffffffff },
11254 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11255 0x00000000, 0xffffffff },
11256 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11257 0x00000000, 0xffffffff },
11258 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11259 0x00000000, 0x000000ff },
11260 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11261 0x00000000, 0xffffffff },
11262 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11263 0x00000000, 0x000000ff },
11264 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11265 0x00000000, 0xffffffff },
11266 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11267 0x00000000, 0xffffffff },
11268 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11269 0x00000000, 0xffffffff },
11270 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11271 0x00000000, 0xffffffff },
11272 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11273 0x00000000, 0xffffffff },
11274 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11275 0xffffffff, 0x00000000 },
11276 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11277 0xffffffff, 0x00000000 },
11279 /* Buffer Manager Control Registers. */
11280 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11281 0x00000000, 0x007fff80 },
11282 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11283 0x00000000, 0x007fffff },
11284 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11285 0x00000000, 0x0000003f },
11286 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11287 0x00000000, 0x000001ff },
11288 { BUFMGR_MB_HIGH_WATER, 0x0000,
11289 0x00000000, 0x000001ff },
11290 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11291 0xffffffff, 0x00000000 },
11292 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11293 0xffffffff, 0x00000000 },
11295 /* Mailbox Registers */
11296 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11297 0x00000000, 0x000001ff },
11298 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11299 0x00000000, 0x000001ff },
11300 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11301 0x00000000, 0x000007ff },
11302 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11303 0x00000000, 0x000001ff },
11305 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11308 is_5705 = is_5750 = 0;
11309 if (tg3_flag(tp, 5705_PLUS)) {
11311 if (tg3_flag(tp, 5750_PLUS))
11315 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11316 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11319 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11322 if (tg3_flag(tp, IS_5788) &&
11323 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11326 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11329 offset = (u32) reg_tbl[i].offset;
11330 read_mask = reg_tbl[i].read_mask;
11331 write_mask = reg_tbl[i].write_mask;
11333 /* Save the original register content */
11334 save_val = tr32(offset);
11336 /* Determine the read-only value. */
11337 read_val = save_val & read_mask;
11339 /* Write zero to the register, then make sure the read-only bits
11340 * are not changed and the read/write bits are all zeros.
11344 val = tr32(offset);
11346 /* Test the read-only and read/write bits. */
11347 if (((val & read_mask) != read_val) || (val & write_mask))
11350 /* Write ones to all the bits defined by RdMask and WrMask, then
11351 * make sure the read-only bits are not changed and the
11352 * read/write bits are all ones.
11354 tw32(offset, read_mask | write_mask);
11356 val = tr32(offset);
11358 /* Test the read-only bits. */
11359 if ((val & read_mask) != read_val)
11362 /* Test the read/write bits. */
11363 if ((val & write_mask) != write_mask)
11366 tw32(offset, save_val);
11372 if (netif_msg_hw(tp))
11373 netdev_err(tp->dev,
11374 "Register test failed at offset %x\n", offset);
11375 tw32(offset, save_val);
11379 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11381 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11385 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11386 for (j = 0; j < len; j += 4) {
11389 tg3_write_mem(tp, offset + j, test_pattern[i]);
11390 tg3_read_mem(tp, offset + j, &val);
11391 if (val != test_pattern[i])
11398 static int tg3_test_memory(struct tg3 *tp)
11400 static struct mem_entry {
11403 } mem_tbl_570x[] = {
11404 { 0x00000000, 0x00b50},
11405 { 0x00002000, 0x1c000},
11406 { 0xffffffff, 0x00000}
11407 }, mem_tbl_5705[] = {
11408 { 0x00000100, 0x0000c},
11409 { 0x00000200, 0x00008},
11410 { 0x00004000, 0x00800},
11411 { 0x00006000, 0x01000},
11412 { 0x00008000, 0x02000},
11413 { 0x00010000, 0x0e000},
11414 { 0xffffffff, 0x00000}
11415 }, mem_tbl_5755[] = {
11416 { 0x00000200, 0x00008},
11417 { 0x00004000, 0x00800},
11418 { 0x00006000, 0x00800},
11419 { 0x00008000, 0x02000},
11420 { 0x00010000, 0x0c000},
11421 { 0xffffffff, 0x00000}
11422 }, mem_tbl_5906[] = {
11423 { 0x00000200, 0x00008},
11424 { 0x00004000, 0x00400},
11425 { 0x00006000, 0x00400},
11426 { 0x00008000, 0x01000},
11427 { 0x00010000, 0x01000},
11428 { 0xffffffff, 0x00000}
11429 }, mem_tbl_5717[] = {
11430 { 0x00000200, 0x00008},
11431 { 0x00010000, 0x0a000},
11432 { 0x00020000, 0x13c00},
11433 { 0xffffffff, 0x00000}
11434 }, mem_tbl_57765[] = {
11435 { 0x00000200, 0x00008},
11436 { 0x00004000, 0x00800},
11437 { 0x00006000, 0x09800},
11438 { 0x00010000, 0x0a000},
11439 { 0xffffffff, 0x00000}
11441 struct mem_entry *mem_tbl;
11445 if (tg3_flag(tp, 5717_PLUS))
11446 mem_tbl = mem_tbl_5717;
11447 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11448 mem_tbl = mem_tbl_57765;
11449 else if (tg3_flag(tp, 5755_PLUS))
11450 mem_tbl = mem_tbl_5755;
11451 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11452 mem_tbl = mem_tbl_5906;
11453 else if (tg3_flag(tp, 5705_PLUS))
11454 mem_tbl = mem_tbl_5705;
11456 mem_tbl = mem_tbl_570x;
11458 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11459 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11467 #define TG3_TSO_MSS 500
11469 #define TG3_TSO_IP_HDR_LEN 20
11470 #define TG3_TSO_TCP_HDR_LEN 20
11471 #define TG3_TSO_TCP_OPT_LEN 12
11473 static const u8 tg3_tso_header[] = {
11475 0x45, 0x00, 0x00, 0x00,
11476 0x00, 0x00, 0x40, 0x00,
11477 0x40, 0x06, 0x00, 0x00,
11478 0x0a, 0x00, 0x00, 0x01,
11479 0x0a, 0x00, 0x00, 0x02,
11480 0x0d, 0x00, 0xe0, 0x00,
11481 0x00, 0x00, 0x01, 0x00,
11482 0x00, 0x00, 0x02, 0x00,
11483 0x80, 0x10, 0x10, 0x00,
11484 0x14, 0x09, 0x00, 0x00,
11485 0x01, 0x01, 0x08, 0x0a,
11486 0x11, 0x11, 0x11, 0x11,
11487 0x11, 0x11, 0x11, 0x11,
11490 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11492 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11493 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11495 struct sk_buff *skb, *rx_skb;
11498 int num_pkts, tx_len, rx_len, i, err;
11499 struct tg3_rx_buffer_desc *desc;
11500 struct tg3_napi *tnapi, *rnapi;
11501 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11503 tnapi = &tp->napi[0];
11504 rnapi = &tp->napi[0];
11505 if (tp->irq_cnt > 1) {
11506 if (tg3_flag(tp, ENABLE_RSS))
11507 rnapi = &tp->napi[1];
11508 if (tg3_flag(tp, ENABLE_TSS))
11509 tnapi = &tp->napi[1];
11511 coal_now = tnapi->coal_now | rnapi->coal_now;
11516 skb = netdev_alloc_skb(tp->dev, tx_len);
11520 tx_data = skb_put(skb, tx_len);
11521 memcpy(tx_data, tp->dev->dev_addr, 6);
11522 memset(tx_data + 6, 0x0, 8);
11524 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11526 if (tso_loopback) {
11527 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11529 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11530 TG3_TSO_TCP_OPT_LEN;
11532 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11533 sizeof(tg3_tso_header));
11536 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11537 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11539 /* Set the total length field in the IP header */
11540 iph->tot_len = htons((u16)(mss + hdr_len));
11542 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11543 TXD_FLAG_CPU_POST_DMA);
11545 if (tg3_flag(tp, HW_TSO_1) ||
11546 tg3_flag(tp, HW_TSO_2) ||
11547 tg3_flag(tp, HW_TSO_3)) {
11549 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11550 th = (struct tcphdr *)&tx_data[val];
11553 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11555 if (tg3_flag(tp, HW_TSO_3)) {
11556 mss |= (hdr_len & 0xc) << 12;
11557 if (hdr_len & 0x10)
11558 base_flags |= 0x00000010;
11559 base_flags |= (hdr_len & 0x3e0) << 5;
11560 } else if (tg3_flag(tp, HW_TSO_2))
11561 mss |= hdr_len << 9;
11562 else if (tg3_flag(tp, HW_TSO_1) ||
11563 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11564 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11566 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11569 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11572 data_off = ETH_HLEN;
11575 for (i = data_off; i < tx_len; i++)
11576 tx_data[i] = (u8) (i & 0xff);
11578 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11579 if (pci_dma_mapping_error(tp->pdev, map)) {
11580 dev_kfree_skb(skb);
11584 val = tnapi->tx_prod;
11585 tnapi->tx_buffers[val].skb = skb;
11586 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11588 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11593 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11595 budget = tg3_tx_avail(tnapi);
11596 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11597 base_flags | TXD_FLAG_END, mss, 0)) {
11598 tnapi->tx_buffers[val].skb = NULL;
11599 dev_kfree_skb(skb);
11605 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11606 tr32_mailbox(tnapi->prodmbox);
11610 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11611 for (i = 0; i < 35; i++) {
11612 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11617 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11618 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11619 if ((tx_idx == tnapi->tx_prod) &&
11620 (rx_idx == (rx_start_idx + num_pkts)))
11624 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11625 dev_kfree_skb(skb);
11627 if (tx_idx != tnapi->tx_prod)
11630 if (rx_idx != rx_start_idx + num_pkts)
11634 while (rx_idx != rx_start_idx) {
11635 desc = &rnapi->rx_rcb[rx_start_idx++];
11636 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11637 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11639 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11640 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11643 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11646 if (!tso_loopback) {
11647 if (rx_len != tx_len)
11650 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11651 if (opaque_key != RXD_OPAQUE_RING_STD)
11654 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11657 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11658 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11659 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11663 if (opaque_key == RXD_OPAQUE_RING_STD) {
11664 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11665 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11667 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11668 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11669 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11674 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11675 PCI_DMA_FROMDEVICE);
11677 for (i = data_off; i < rx_len; i++, val++) {
11678 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11685 /* tg3_free_rings will unmap and free the rx_skb */
11690 #define TG3_STD_LOOPBACK_FAILED 1
11691 #define TG3_JMB_LOOPBACK_FAILED 2
11692 #define TG3_TSO_LOOPBACK_FAILED 4
11693 #define TG3_LOOPBACK_FAILED \
11694 (TG3_STD_LOOPBACK_FAILED | \
11695 TG3_JMB_LOOPBACK_FAILED | \
11696 TG3_TSO_LOOPBACK_FAILED)
11698 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11703 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11704 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11706 if (!netif_running(tp->dev)) {
11707 data[0] = TG3_LOOPBACK_FAILED;
11708 data[1] = TG3_LOOPBACK_FAILED;
11710 data[2] = TG3_LOOPBACK_FAILED;
11714 err = tg3_reset_hw(tp, 1);
11716 data[0] = TG3_LOOPBACK_FAILED;
11717 data[1] = TG3_LOOPBACK_FAILED;
11719 data[2] = TG3_LOOPBACK_FAILED;
11723 if (tg3_flag(tp, ENABLE_RSS)) {
11726 /* Reroute all rx packets to the 1st queue */
11727 for (i = MAC_RSS_INDIR_TBL_0;
11728 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11732 /* HW errata - mac loopback fails in some cases on 5780.
11733 * Normal traffic and PHY loopback are not affected by
11734 * errata. Also, the MAC loopback test is deprecated for
11735 * all newer ASIC revisions.
11737 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11738 !tg3_flag(tp, CPMU_PRESENT)) {
11739 tg3_mac_loopback(tp, true);
11741 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11742 data[0] |= TG3_STD_LOOPBACK_FAILED;
11744 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11745 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11746 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11748 tg3_mac_loopback(tp, false);
11751 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11752 !tg3_flag(tp, USE_PHYLIB)) {
11755 tg3_phy_lpbk_set(tp, 0, false);
11757 /* Wait for link */
11758 for (i = 0; i < 100; i++) {
11759 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11764 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11765 data[1] |= TG3_STD_LOOPBACK_FAILED;
11766 if (tg3_flag(tp, TSO_CAPABLE) &&
11767 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11768 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11769 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11770 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11771 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11774 tg3_phy_lpbk_set(tp, 0, true);
11776 /* All link indications report up, but the hardware
11777 * isn't really ready for about 20 msec. Double it
11782 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11783 data[2] |= TG3_STD_LOOPBACK_FAILED;
11784 if (tg3_flag(tp, TSO_CAPABLE) &&
11785 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11786 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11787 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11788 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11789 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11792 /* Re-enable gphy autopowerdown. */
11793 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11794 tg3_phy_toggle_apd(tp, true);
11797 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11800 tp->phy_flags |= eee_cap;
11805 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11808 struct tg3 *tp = netdev_priv(dev);
11809 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11811 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11812 tg3_power_up(tp)) {
11813 etest->flags |= ETH_TEST_FL_FAILED;
11814 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11818 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11820 if (tg3_test_nvram(tp) != 0) {
11821 etest->flags |= ETH_TEST_FL_FAILED;
11824 if (!doextlpbk && tg3_test_link(tp)) {
11825 etest->flags |= ETH_TEST_FL_FAILED;
11828 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11829 int err, err2 = 0, irq_sync = 0;
11831 if (netif_running(dev)) {
11833 tg3_netif_stop(tp);
11837 tg3_full_lock(tp, irq_sync);
11839 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11840 err = tg3_nvram_lock(tp);
11841 tg3_halt_cpu(tp, RX_CPU_BASE);
11842 if (!tg3_flag(tp, 5705_PLUS))
11843 tg3_halt_cpu(tp, TX_CPU_BASE);
11845 tg3_nvram_unlock(tp);
11847 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11850 if (tg3_test_registers(tp) != 0) {
11851 etest->flags |= ETH_TEST_FL_FAILED;
11855 if (tg3_test_memory(tp) != 0) {
11856 etest->flags |= ETH_TEST_FL_FAILED;
11861 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11863 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11864 etest->flags |= ETH_TEST_FL_FAILED;
11866 tg3_full_unlock(tp);
11868 if (tg3_test_interrupt(tp) != 0) {
11869 etest->flags |= ETH_TEST_FL_FAILED;
11873 tg3_full_lock(tp, 0);
11875 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11876 if (netif_running(dev)) {
11877 tg3_flag_set(tp, INIT_COMPLETE);
11878 err2 = tg3_restart_hw(tp, 1);
11880 tg3_netif_start(tp);
11883 tg3_full_unlock(tp);
11885 if (irq_sync && !err2)
11888 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11889 tg3_power_down(tp);
11893 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11895 struct mii_ioctl_data *data = if_mii(ifr);
11896 struct tg3 *tp = netdev_priv(dev);
11899 if (tg3_flag(tp, USE_PHYLIB)) {
11900 struct phy_device *phydev;
11901 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11903 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11904 return phy_mii_ioctl(phydev, ifr, cmd);
11909 data->phy_id = tp->phy_addr;
11912 case SIOCGMIIREG: {
11915 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11916 break; /* We have no PHY */
11918 if (!netif_running(dev))
11921 spin_lock_bh(&tp->lock);
11922 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11923 spin_unlock_bh(&tp->lock);
11925 data->val_out = mii_regval;
11931 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11932 break; /* We have no PHY */
11934 if (!netif_running(dev))
11937 spin_lock_bh(&tp->lock);
11938 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11939 spin_unlock_bh(&tp->lock);
11947 return -EOPNOTSUPP;
11950 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11952 struct tg3 *tp = netdev_priv(dev);
11954 memcpy(ec, &tp->coal, sizeof(*ec));
11958 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11960 struct tg3 *tp = netdev_priv(dev);
11961 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11962 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11964 if (!tg3_flag(tp, 5705_PLUS)) {
11965 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11966 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11967 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11968 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11971 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11972 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11973 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11974 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11975 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11976 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11977 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11978 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11979 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11980 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11983 /* No rx interrupts will be generated if both are zero */
11984 if ((ec->rx_coalesce_usecs == 0) &&
11985 (ec->rx_max_coalesced_frames == 0))
11988 /* No tx interrupts will be generated if both are zero */
11989 if ((ec->tx_coalesce_usecs == 0) &&
11990 (ec->tx_max_coalesced_frames == 0))
11993 /* Only copy relevant parameters, ignore all others. */
11994 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11995 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11996 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11997 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11998 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11999 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12000 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12001 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12002 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12004 if (netif_running(dev)) {
12005 tg3_full_lock(tp, 0);
12006 __tg3_set_coalesce(tp, &tp->coal);
12007 tg3_full_unlock(tp);
12012 static const struct ethtool_ops tg3_ethtool_ops = {
12013 .get_settings = tg3_get_settings,
12014 .set_settings = tg3_set_settings,
12015 .get_drvinfo = tg3_get_drvinfo,
12016 .get_regs_len = tg3_get_regs_len,
12017 .get_regs = tg3_get_regs,
12018 .get_wol = tg3_get_wol,
12019 .set_wol = tg3_set_wol,
12020 .get_msglevel = tg3_get_msglevel,
12021 .set_msglevel = tg3_set_msglevel,
12022 .nway_reset = tg3_nway_reset,
12023 .get_link = ethtool_op_get_link,
12024 .get_eeprom_len = tg3_get_eeprom_len,
12025 .get_eeprom = tg3_get_eeprom,
12026 .set_eeprom = tg3_set_eeprom,
12027 .get_ringparam = tg3_get_ringparam,
12028 .set_ringparam = tg3_set_ringparam,
12029 .get_pauseparam = tg3_get_pauseparam,
12030 .set_pauseparam = tg3_set_pauseparam,
12031 .self_test = tg3_self_test,
12032 .get_strings = tg3_get_strings,
12033 .set_phys_id = tg3_set_phys_id,
12034 .get_ethtool_stats = tg3_get_ethtool_stats,
12035 .get_coalesce = tg3_get_coalesce,
12036 .set_coalesce = tg3_set_coalesce,
12037 .get_sset_count = tg3_get_sset_count,
12040 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12042 u32 cursize, val, magic;
12044 tp->nvram_size = EEPROM_CHIP_SIZE;
12046 if (tg3_nvram_read(tp, 0, &magic) != 0)
12049 if ((magic != TG3_EEPROM_MAGIC) &&
12050 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12051 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12055 * Size the chip by reading offsets at increasing powers of two.
12056 * When we encounter our validation signature, we know the addressing
12057 * has wrapped around, and thus have our chip size.
12061 while (cursize < tp->nvram_size) {
12062 if (tg3_nvram_read(tp, cursize, &val) != 0)
12071 tp->nvram_size = cursize;
12074 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12078 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12081 /* Selfboot format */
12082 if (val != TG3_EEPROM_MAGIC) {
12083 tg3_get_eeprom_size(tp);
12087 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12089 /* This is confusing. We want to operate on the
12090 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12091 * call will read from NVRAM and byteswap the data
12092 * according to the byteswapping settings for all
12093 * other register accesses. This ensures the data we
12094 * want will always reside in the lower 16-bits.
12095 * However, the data in NVRAM is in LE format, which
12096 * means the data from the NVRAM read will always be
12097 * opposite the endianness of the CPU. The 16-bit
12098 * byteswap then brings the data to CPU endianness.
12100 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12104 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12107 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12111 nvcfg1 = tr32(NVRAM_CFG1);
12112 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12113 tg3_flag_set(tp, FLASH);
12115 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12116 tw32(NVRAM_CFG1, nvcfg1);
12119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12120 tg3_flag(tp, 5780_CLASS)) {
12121 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12122 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12123 tp->nvram_jedecnum = JEDEC_ATMEL;
12124 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12125 tg3_flag_set(tp, NVRAM_BUFFERED);
12127 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12128 tp->nvram_jedecnum = JEDEC_ATMEL;
12129 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12131 case FLASH_VENDOR_ATMEL_EEPROM:
12132 tp->nvram_jedecnum = JEDEC_ATMEL;
12133 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12134 tg3_flag_set(tp, NVRAM_BUFFERED);
12136 case FLASH_VENDOR_ST:
12137 tp->nvram_jedecnum = JEDEC_ST;
12138 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12139 tg3_flag_set(tp, NVRAM_BUFFERED);
12141 case FLASH_VENDOR_SAIFUN:
12142 tp->nvram_jedecnum = JEDEC_SAIFUN;
12143 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12145 case FLASH_VENDOR_SST_SMALL:
12146 case FLASH_VENDOR_SST_LARGE:
12147 tp->nvram_jedecnum = JEDEC_SST;
12148 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12152 tp->nvram_jedecnum = JEDEC_ATMEL;
12153 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12154 tg3_flag_set(tp, NVRAM_BUFFERED);
12158 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12160 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12161 case FLASH_5752PAGE_SIZE_256:
12162 tp->nvram_pagesize = 256;
12164 case FLASH_5752PAGE_SIZE_512:
12165 tp->nvram_pagesize = 512;
12167 case FLASH_5752PAGE_SIZE_1K:
12168 tp->nvram_pagesize = 1024;
12170 case FLASH_5752PAGE_SIZE_2K:
12171 tp->nvram_pagesize = 2048;
12173 case FLASH_5752PAGE_SIZE_4K:
12174 tp->nvram_pagesize = 4096;
12176 case FLASH_5752PAGE_SIZE_264:
12177 tp->nvram_pagesize = 264;
12179 case FLASH_5752PAGE_SIZE_528:
12180 tp->nvram_pagesize = 528;
12185 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12189 nvcfg1 = tr32(NVRAM_CFG1);
12191 /* NVRAM protection for TPM */
12192 if (nvcfg1 & (1 << 27))
12193 tg3_flag_set(tp, PROTECTED_NVRAM);
12195 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12196 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12197 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12198 tp->nvram_jedecnum = JEDEC_ATMEL;
12199 tg3_flag_set(tp, NVRAM_BUFFERED);
12201 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12202 tp->nvram_jedecnum = JEDEC_ATMEL;
12203 tg3_flag_set(tp, NVRAM_BUFFERED);
12204 tg3_flag_set(tp, FLASH);
12206 case FLASH_5752VENDOR_ST_M45PE10:
12207 case FLASH_5752VENDOR_ST_M45PE20:
12208 case FLASH_5752VENDOR_ST_M45PE40:
12209 tp->nvram_jedecnum = JEDEC_ST;
12210 tg3_flag_set(tp, NVRAM_BUFFERED);
12211 tg3_flag_set(tp, FLASH);
12215 if (tg3_flag(tp, FLASH)) {
12216 tg3_nvram_get_pagesize(tp, nvcfg1);
12218 /* For eeprom, set pagesize to maximum eeprom size */
12219 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12221 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12222 tw32(NVRAM_CFG1, nvcfg1);
12226 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12228 u32 nvcfg1, protect = 0;
12230 nvcfg1 = tr32(NVRAM_CFG1);
12232 /* NVRAM protection for TPM */
12233 if (nvcfg1 & (1 << 27)) {
12234 tg3_flag_set(tp, PROTECTED_NVRAM);
12238 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12240 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12241 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12242 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12243 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12244 tp->nvram_jedecnum = JEDEC_ATMEL;
12245 tg3_flag_set(tp, NVRAM_BUFFERED);
12246 tg3_flag_set(tp, FLASH);
12247 tp->nvram_pagesize = 264;
12248 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12249 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12250 tp->nvram_size = (protect ? 0x3e200 :
12251 TG3_NVRAM_SIZE_512KB);
12252 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12253 tp->nvram_size = (protect ? 0x1f200 :
12254 TG3_NVRAM_SIZE_256KB);
12256 tp->nvram_size = (protect ? 0x1f200 :
12257 TG3_NVRAM_SIZE_128KB);
12259 case FLASH_5752VENDOR_ST_M45PE10:
12260 case FLASH_5752VENDOR_ST_M45PE20:
12261 case FLASH_5752VENDOR_ST_M45PE40:
12262 tp->nvram_jedecnum = JEDEC_ST;
12263 tg3_flag_set(tp, NVRAM_BUFFERED);
12264 tg3_flag_set(tp, FLASH);
12265 tp->nvram_pagesize = 256;
12266 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12267 tp->nvram_size = (protect ?
12268 TG3_NVRAM_SIZE_64KB :
12269 TG3_NVRAM_SIZE_128KB);
12270 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12271 tp->nvram_size = (protect ?
12272 TG3_NVRAM_SIZE_64KB :
12273 TG3_NVRAM_SIZE_256KB);
12275 tp->nvram_size = (protect ?
12276 TG3_NVRAM_SIZE_128KB :
12277 TG3_NVRAM_SIZE_512KB);
12282 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12286 nvcfg1 = tr32(NVRAM_CFG1);
12288 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12289 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12290 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12291 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12292 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12293 tp->nvram_jedecnum = JEDEC_ATMEL;
12294 tg3_flag_set(tp, NVRAM_BUFFERED);
12295 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12297 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12298 tw32(NVRAM_CFG1, nvcfg1);
12300 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12301 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12302 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12303 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12304 tp->nvram_jedecnum = JEDEC_ATMEL;
12305 tg3_flag_set(tp, NVRAM_BUFFERED);
12306 tg3_flag_set(tp, FLASH);
12307 tp->nvram_pagesize = 264;
12309 case FLASH_5752VENDOR_ST_M45PE10:
12310 case FLASH_5752VENDOR_ST_M45PE20:
12311 case FLASH_5752VENDOR_ST_M45PE40:
12312 tp->nvram_jedecnum = JEDEC_ST;
12313 tg3_flag_set(tp, NVRAM_BUFFERED);
12314 tg3_flag_set(tp, FLASH);
12315 tp->nvram_pagesize = 256;
12320 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12322 u32 nvcfg1, protect = 0;
12324 nvcfg1 = tr32(NVRAM_CFG1);
12326 /* NVRAM protection for TPM */
12327 if (nvcfg1 & (1 << 27)) {
12328 tg3_flag_set(tp, PROTECTED_NVRAM);
12332 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12334 case FLASH_5761VENDOR_ATMEL_ADB021D:
12335 case FLASH_5761VENDOR_ATMEL_ADB041D:
12336 case FLASH_5761VENDOR_ATMEL_ADB081D:
12337 case FLASH_5761VENDOR_ATMEL_ADB161D:
12338 case FLASH_5761VENDOR_ATMEL_MDB021D:
12339 case FLASH_5761VENDOR_ATMEL_MDB041D:
12340 case FLASH_5761VENDOR_ATMEL_MDB081D:
12341 case FLASH_5761VENDOR_ATMEL_MDB161D:
12342 tp->nvram_jedecnum = JEDEC_ATMEL;
12343 tg3_flag_set(tp, NVRAM_BUFFERED);
12344 tg3_flag_set(tp, FLASH);
12345 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12346 tp->nvram_pagesize = 256;
12348 case FLASH_5761VENDOR_ST_A_M45PE20:
12349 case FLASH_5761VENDOR_ST_A_M45PE40:
12350 case FLASH_5761VENDOR_ST_A_M45PE80:
12351 case FLASH_5761VENDOR_ST_A_M45PE16:
12352 case FLASH_5761VENDOR_ST_M_M45PE20:
12353 case FLASH_5761VENDOR_ST_M_M45PE40:
12354 case FLASH_5761VENDOR_ST_M_M45PE80:
12355 case FLASH_5761VENDOR_ST_M_M45PE16:
12356 tp->nvram_jedecnum = JEDEC_ST;
12357 tg3_flag_set(tp, NVRAM_BUFFERED);
12358 tg3_flag_set(tp, FLASH);
12359 tp->nvram_pagesize = 256;
12364 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12367 case FLASH_5761VENDOR_ATMEL_ADB161D:
12368 case FLASH_5761VENDOR_ATMEL_MDB161D:
12369 case FLASH_5761VENDOR_ST_A_M45PE16:
12370 case FLASH_5761VENDOR_ST_M_M45PE16:
12371 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12373 case FLASH_5761VENDOR_ATMEL_ADB081D:
12374 case FLASH_5761VENDOR_ATMEL_MDB081D:
12375 case FLASH_5761VENDOR_ST_A_M45PE80:
12376 case FLASH_5761VENDOR_ST_M_M45PE80:
12377 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12379 case FLASH_5761VENDOR_ATMEL_ADB041D:
12380 case FLASH_5761VENDOR_ATMEL_MDB041D:
12381 case FLASH_5761VENDOR_ST_A_M45PE40:
12382 case FLASH_5761VENDOR_ST_M_M45PE40:
12383 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12385 case FLASH_5761VENDOR_ATMEL_ADB021D:
12386 case FLASH_5761VENDOR_ATMEL_MDB021D:
12387 case FLASH_5761VENDOR_ST_A_M45PE20:
12388 case FLASH_5761VENDOR_ST_M_M45PE20:
12389 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12395 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12397 tp->nvram_jedecnum = JEDEC_ATMEL;
12398 tg3_flag_set(tp, NVRAM_BUFFERED);
12399 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12402 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12406 nvcfg1 = tr32(NVRAM_CFG1);
12408 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12409 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12410 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12411 tp->nvram_jedecnum = JEDEC_ATMEL;
12412 tg3_flag_set(tp, NVRAM_BUFFERED);
12413 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12415 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12416 tw32(NVRAM_CFG1, nvcfg1);
12418 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12419 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12420 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12421 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12422 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12423 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12424 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12425 tp->nvram_jedecnum = JEDEC_ATMEL;
12426 tg3_flag_set(tp, NVRAM_BUFFERED);
12427 tg3_flag_set(tp, FLASH);
12429 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12430 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12431 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12432 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12433 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12435 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12436 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12437 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12439 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12440 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12441 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12445 case FLASH_5752VENDOR_ST_M45PE10:
12446 case FLASH_5752VENDOR_ST_M45PE20:
12447 case FLASH_5752VENDOR_ST_M45PE40:
12448 tp->nvram_jedecnum = JEDEC_ST;
12449 tg3_flag_set(tp, NVRAM_BUFFERED);
12450 tg3_flag_set(tp, FLASH);
12452 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12453 case FLASH_5752VENDOR_ST_M45PE10:
12454 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12456 case FLASH_5752VENDOR_ST_M45PE20:
12457 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12459 case FLASH_5752VENDOR_ST_M45PE40:
12460 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12465 tg3_flag_set(tp, NO_NVRAM);
12469 tg3_nvram_get_pagesize(tp, nvcfg1);
12470 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12471 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12475 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12479 nvcfg1 = tr32(NVRAM_CFG1);
12481 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12482 case FLASH_5717VENDOR_ATMEL_EEPROM:
12483 case FLASH_5717VENDOR_MICRO_EEPROM:
12484 tp->nvram_jedecnum = JEDEC_ATMEL;
12485 tg3_flag_set(tp, NVRAM_BUFFERED);
12486 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12488 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12489 tw32(NVRAM_CFG1, nvcfg1);
12491 case FLASH_5717VENDOR_ATMEL_MDB011D:
12492 case FLASH_5717VENDOR_ATMEL_ADB011B:
12493 case FLASH_5717VENDOR_ATMEL_ADB011D:
12494 case FLASH_5717VENDOR_ATMEL_MDB021D:
12495 case FLASH_5717VENDOR_ATMEL_ADB021B:
12496 case FLASH_5717VENDOR_ATMEL_ADB021D:
12497 case FLASH_5717VENDOR_ATMEL_45USPT:
12498 tp->nvram_jedecnum = JEDEC_ATMEL;
12499 tg3_flag_set(tp, NVRAM_BUFFERED);
12500 tg3_flag_set(tp, FLASH);
12502 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12503 case FLASH_5717VENDOR_ATMEL_MDB021D:
12504 /* Detect size with tg3_nvram_get_size() */
12506 case FLASH_5717VENDOR_ATMEL_ADB021B:
12507 case FLASH_5717VENDOR_ATMEL_ADB021D:
12508 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12511 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12515 case FLASH_5717VENDOR_ST_M_M25PE10:
12516 case FLASH_5717VENDOR_ST_A_M25PE10:
12517 case FLASH_5717VENDOR_ST_M_M45PE10:
12518 case FLASH_5717VENDOR_ST_A_M45PE10:
12519 case FLASH_5717VENDOR_ST_M_M25PE20:
12520 case FLASH_5717VENDOR_ST_A_M25PE20:
12521 case FLASH_5717VENDOR_ST_M_M45PE20:
12522 case FLASH_5717VENDOR_ST_A_M45PE20:
12523 case FLASH_5717VENDOR_ST_25USPT:
12524 case FLASH_5717VENDOR_ST_45USPT:
12525 tp->nvram_jedecnum = JEDEC_ST;
12526 tg3_flag_set(tp, NVRAM_BUFFERED);
12527 tg3_flag_set(tp, FLASH);
12529 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12530 case FLASH_5717VENDOR_ST_M_M25PE20:
12531 case FLASH_5717VENDOR_ST_M_M45PE20:
12532 /* Detect size with tg3_nvram_get_size() */
12534 case FLASH_5717VENDOR_ST_A_M25PE20:
12535 case FLASH_5717VENDOR_ST_A_M45PE20:
12536 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12539 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12544 tg3_flag_set(tp, NO_NVRAM);
12548 tg3_nvram_get_pagesize(tp, nvcfg1);
12549 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12550 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12553 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12555 u32 nvcfg1, nvmpinstrp;
12557 nvcfg1 = tr32(NVRAM_CFG1);
12558 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12560 switch (nvmpinstrp) {
12561 case FLASH_5720_EEPROM_HD:
12562 case FLASH_5720_EEPROM_LD:
12563 tp->nvram_jedecnum = JEDEC_ATMEL;
12564 tg3_flag_set(tp, NVRAM_BUFFERED);
12566 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12567 tw32(NVRAM_CFG1, nvcfg1);
12568 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12569 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12571 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12573 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12574 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12575 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12576 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12577 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12578 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12579 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12580 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12581 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12582 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12583 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12584 case FLASH_5720VENDOR_ATMEL_45USPT:
12585 tp->nvram_jedecnum = JEDEC_ATMEL;
12586 tg3_flag_set(tp, NVRAM_BUFFERED);
12587 tg3_flag_set(tp, FLASH);
12589 switch (nvmpinstrp) {
12590 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12591 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12592 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12593 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12595 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12596 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12597 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12598 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12600 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12601 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12602 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12605 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12609 case FLASH_5720VENDOR_M_ST_M25PE10:
12610 case FLASH_5720VENDOR_M_ST_M45PE10:
12611 case FLASH_5720VENDOR_A_ST_M25PE10:
12612 case FLASH_5720VENDOR_A_ST_M45PE10:
12613 case FLASH_5720VENDOR_M_ST_M25PE20:
12614 case FLASH_5720VENDOR_M_ST_M45PE20:
12615 case FLASH_5720VENDOR_A_ST_M25PE20:
12616 case FLASH_5720VENDOR_A_ST_M45PE20:
12617 case FLASH_5720VENDOR_M_ST_M25PE40:
12618 case FLASH_5720VENDOR_M_ST_M45PE40:
12619 case FLASH_5720VENDOR_A_ST_M25PE40:
12620 case FLASH_5720VENDOR_A_ST_M45PE40:
12621 case FLASH_5720VENDOR_M_ST_M25PE80:
12622 case FLASH_5720VENDOR_M_ST_M45PE80:
12623 case FLASH_5720VENDOR_A_ST_M25PE80:
12624 case FLASH_5720VENDOR_A_ST_M45PE80:
12625 case FLASH_5720VENDOR_ST_25USPT:
12626 case FLASH_5720VENDOR_ST_45USPT:
12627 tp->nvram_jedecnum = JEDEC_ST;
12628 tg3_flag_set(tp, NVRAM_BUFFERED);
12629 tg3_flag_set(tp, FLASH);
12631 switch (nvmpinstrp) {
12632 case FLASH_5720VENDOR_M_ST_M25PE20:
12633 case FLASH_5720VENDOR_M_ST_M45PE20:
12634 case FLASH_5720VENDOR_A_ST_M25PE20:
12635 case FLASH_5720VENDOR_A_ST_M45PE20:
12636 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12638 case FLASH_5720VENDOR_M_ST_M25PE40:
12639 case FLASH_5720VENDOR_M_ST_M45PE40:
12640 case FLASH_5720VENDOR_A_ST_M25PE40:
12641 case FLASH_5720VENDOR_A_ST_M45PE40:
12642 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12644 case FLASH_5720VENDOR_M_ST_M25PE80:
12645 case FLASH_5720VENDOR_M_ST_M45PE80:
12646 case FLASH_5720VENDOR_A_ST_M25PE80:
12647 case FLASH_5720VENDOR_A_ST_M45PE80:
12648 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12651 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12656 tg3_flag_set(tp, NO_NVRAM);
12660 tg3_nvram_get_pagesize(tp, nvcfg1);
12661 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12662 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12665 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12666 static void __devinit tg3_nvram_init(struct tg3 *tp)
12668 tw32_f(GRC_EEPROM_ADDR,
12669 (EEPROM_ADDR_FSM_RESET |
12670 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12671 EEPROM_ADDR_CLKPERD_SHIFT)));
12675 /* Enable seeprom accesses. */
12676 tw32_f(GRC_LOCAL_CTRL,
12677 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12680 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12681 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12682 tg3_flag_set(tp, NVRAM);
12684 if (tg3_nvram_lock(tp)) {
12685 netdev_warn(tp->dev,
12686 "Cannot get nvram lock, %s failed\n",
12690 tg3_enable_nvram_access(tp);
12692 tp->nvram_size = 0;
12694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12695 tg3_get_5752_nvram_info(tp);
12696 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12697 tg3_get_5755_nvram_info(tp);
12698 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12700 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12701 tg3_get_5787_nvram_info(tp);
12702 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12703 tg3_get_5761_nvram_info(tp);
12704 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12705 tg3_get_5906_nvram_info(tp);
12706 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12708 tg3_get_57780_nvram_info(tp);
12709 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12710 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12711 tg3_get_5717_nvram_info(tp);
12712 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12713 tg3_get_5720_nvram_info(tp);
12715 tg3_get_nvram_info(tp);
12717 if (tp->nvram_size == 0)
12718 tg3_get_nvram_size(tp);
12720 tg3_disable_nvram_access(tp);
12721 tg3_nvram_unlock(tp);
12724 tg3_flag_clear(tp, NVRAM);
12725 tg3_flag_clear(tp, NVRAM_BUFFERED);
12727 tg3_get_eeprom_size(tp);
12731 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12732 u32 offset, u32 len, u8 *buf)
12737 for (i = 0; i < len; i += 4) {
12743 memcpy(&data, buf + i, 4);
12746 * The SEEPROM interface expects the data to always be opposite
12747 * the native endian format. We accomplish this by reversing
12748 * all the operations that would have been performed on the
12749 * data from a call to tg3_nvram_read_be32().
12751 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12753 val = tr32(GRC_EEPROM_ADDR);
12754 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12756 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12758 tw32(GRC_EEPROM_ADDR, val |
12759 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12760 (addr & EEPROM_ADDR_ADDR_MASK) |
12761 EEPROM_ADDR_START |
12762 EEPROM_ADDR_WRITE);
12764 for (j = 0; j < 1000; j++) {
12765 val = tr32(GRC_EEPROM_ADDR);
12767 if (val & EEPROM_ADDR_COMPLETE)
12771 if (!(val & EEPROM_ADDR_COMPLETE)) {
12780 /* offset and length are dword aligned */
12781 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12785 u32 pagesize = tp->nvram_pagesize;
12786 u32 pagemask = pagesize - 1;
12790 tmp = kmalloc(pagesize, GFP_KERNEL);
12796 u32 phy_addr, page_off, size;
12798 phy_addr = offset & ~pagemask;
12800 for (j = 0; j < pagesize; j += 4) {
12801 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12802 (__be32 *) (tmp + j));
12809 page_off = offset & pagemask;
12816 memcpy(tmp + page_off, buf, size);
12818 offset = offset + (pagesize - page_off);
12820 tg3_enable_nvram_access(tp);
12823 * Before we can erase the flash page, we need
12824 * to issue a special "write enable" command.
12826 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12828 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12831 /* Erase the target page */
12832 tw32(NVRAM_ADDR, phy_addr);
12834 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12835 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12837 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12840 /* Issue another write enable to start the write. */
12841 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12843 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12846 for (j = 0; j < pagesize; j += 4) {
12849 data = *((__be32 *) (tmp + j));
12851 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12853 tw32(NVRAM_ADDR, phy_addr + j);
12855 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12859 nvram_cmd |= NVRAM_CMD_FIRST;
12860 else if (j == (pagesize - 4))
12861 nvram_cmd |= NVRAM_CMD_LAST;
12863 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12870 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12871 tg3_nvram_exec_cmd(tp, nvram_cmd);
12878 /* offset and length are dword aligned */
12879 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12884 for (i = 0; i < len; i += 4, offset += 4) {
12885 u32 page_off, phy_addr, nvram_cmd;
12888 memcpy(&data, buf + i, 4);
12889 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12891 page_off = offset % tp->nvram_pagesize;
12893 phy_addr = tg3_nvram_phys_addr(tp, offset);
12895 tw32(NVRAM_ADDR, phy_addr);
12897 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12899 if (page_off == 0 || i == 0)
12900 nvram_cmd |= NVRAM_CMD_FIRST;
12901 if (page_off == (tp->nvram_pagesize - 4))
12902 nvram_cmd |= NVRAM_CMD_LAST;
12904 if (i == (len - 4))
12905 nvram_cmd |= NVRAM_CMD_LAST;
12907 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12908 !tg3_flag(tp, 5755_PLUS) &&
12909 (tp->nvram_jedecnum == JEDEC_ST) &&
12910 (nvram_cmd & NVRAM_CMD_FIRST)) {
12912 if ((ret = tg3_nvram_exec_cmd(tp,
12913 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12918 if (!tg3_flag(tp, FLASH)) {
12919 /* We always do complete word writes to eeprom. */
12920 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12923 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12929 /* offset and length are dword aligned */
12930 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12934 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12935 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12936 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12940 if (!tg3_flag(tp, NVRAM)) {
12941 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12945 ret = tg3_nvram_lock(tp);
12949 tg3_enable_nvram_access(tp);
12950 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12951 tw32(NVRAM_WRITE1, 0x406);
12953 grc_mode = tr32(GRC_MODE);
12954 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12956 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12957 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12960 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12964 grc_mode = tr32(GRC_MODE);
12965 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12967 tg3_disable_nvram_access(tp);
12968 tg3_nvram_unlock(tp);
12971 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12972 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12979 struct subsys_tbl_ent {
12980 u16 subsys_vendor, subsys_devid;
12984 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12985 /* Broadcom boards. */
12986 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12987 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12988 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12989 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12990 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12991 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12992 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12993 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12994 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12995 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12996 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12997 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12998 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12999 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13000 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13001 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13002 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13003 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13004 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13005 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13006 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13007 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13010 { TG3PCI_SUBVENDOR_ID_3COM,
13011 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13012 { TG3PCI_SUBVENDOR_ID_3COM,
13013 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13014 { TG3PCI_SUBVENDOR_ID_3COM,
13015 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13016 { TG3PCI_SUBVENDOR_ID_3COM,
13017 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13018 { TG3PCI_SUBVENDOR_ID_3COM,
13019 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13022 { TG3PCI_SUBVENDOR_ID_DELL,
13023 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13024 { TG3PCI_SUBVENDOR_ID_DELL,
13025 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13026 { TG3PCI_SUBVENDOR_ID_DELL,
13027 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13028 { TG3PCI_SUBVENDOR_ID_DELL,
13029 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13031 /* Compaq boards. */
13032 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13033 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13034 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13035 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13036 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13037 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13038 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13039 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13040 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13041 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13044 { TG3PCI_SUBVENDOR_ID_IBM,
13045 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13048 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13052 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13053 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13054 tp->pdev->subsystem_vendor) &&
13055 (subsys_id_to_phy_id[i].subsys_devid ==
13056 tp->pdev->subsystem_device))
13057 return &subsys_id_to_phy_id[i];
13062 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13066 tp->phy_id = TG3_PHY_ID_INVALID;
13067 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13069 /* Assume an onboard device and WOL capable by default. */
13070 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13071 tg3_flag_set(tp, WOL_CAP);
13073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13074 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13075 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13076 tg3_flag_set(tp, IS_NIC);
13078 val = tr32(VCPU_CFGSHDW);
13079 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13080 tg3_flag_set(tp, ASPM_WORKAROUND);
13081 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13082 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13083 tg3_flag_set(tp, WOL_ENABLE);
13084 device_set_wakeup_enable(&tp->pdev->dev, true);
13089 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13090 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13091 u32 nic_cfg, led_cfg;
13092 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13093 int eeprom_phy_serdes = 0;
13095 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13096 tp->nic_sram_data_cfg = nic_cfg;
13098 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13099 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13100 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13101 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13102 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13103 (ver > 0) && (ver < 0x100))
13104 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13107 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13109 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13110 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13111 eeprom_phy_serdes = 1;
13113 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13114 if (nic_phy_id != 0) {
13115 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13116 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13118 eeprom_phy_id = (id1 >> 16) << 10;
13119 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13120 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13124 tp->phy_id = eeprom_phy_id;
13125 if (eeprom_phy_serdes) {
13126 if (!tg3_flag(tp, 5705_PLUS))
13127 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13129 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13132 if (tg3_flag(tp, 5750_PLUS))
13133 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13134 SHASTA_EXT_LED_MODE_MASK);
13136 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13140 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13141 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13144 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13145 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13148 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13149 tp->led_ctrl = LED_CTRL_MODE_MAC;
13151 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13152 * read on some older 5700/5701 bootcode.
13154 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13156 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13158 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13162 case SHASTA_EXT_LED_SHARED:
13163 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13164 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13165 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13166 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13167 LED_CTRL_MODE_PHY_2);
13170 case SHASTA_EXT_LED_MAC:
13171 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13174 case SHASTA_EXT_LED_COMBO:
13175 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13176 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13177 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13178 LED_CTRL_MODE_PHY_2);
13183 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13185 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13186 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13188 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13189 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13191 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13192 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13193 if ((tp->pdev->subsystem_vendor ==
13194 PCI_VENDOR_ID_ARIMA) &&
13195 (tp->pdev->subsystem_device == 0x205a ||
13196 tp->pdev->subsystem_device == 0x2063))
13197 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13199 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13200 tg3_flag_set(tp, IS_NIC);
13203 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13204 tg3_flag_set(tp, ENABLE_ASF);
13205 if (tg3_flag(tp, 5750_PLUS))
13206 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13209 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13210 tg3_flag(tp, 5750_PLUS))
13211 tg3_flag_set(tp, ENABLE_APE);
13213 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13214 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13215 tg3_flag_clear(tp, WOL_CAP);
13217 if (tg3_flag(tp, WOL_CAP) &&
13218 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13219 tg3_flag_set(tp, WOL_ENABLE);
13220 device_set_wakeup_enable(&tp->pdev->dev, true);
13223 if (cfg2 & (1 << 17))
13224 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13226 /* serdes signal pre-emphasis in register 0x590 set by */
13227 /* bootcode if bit 18 is set */
13228 if (cfg2 & (1 << 18))
13229 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13231 if ((tg3_flag(tp, 57765_PLUS) ||
13232 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13233 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13234 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13235 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13237 if (tg3_flag(tp, PCI_EXPRESS) &&
13238 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13239 !tg3_flag(tp, 57765_PLUS)) {
13242 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13243 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13244 tg3_flag_set(tp, ASPM_WORKAROUND);
13247 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13248 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13249 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13250 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13251 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13252 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13255 if (tg3_flag(tp, WOL_CAP))
13256 device_set_wakeup_enable(&tp->pdev->dev,
13257 tg3_flag(tp, WOL_ENABLE));
13259 device_set_wakeup_capable(&tp->pdev->dev, false);
13262 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13267 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13268 tw32(OTP_CTRL, cmd);
13270 /* Wait for up to 1 ms for command to execute. */
13271 for (i = 0; i < 100; i++) {
13272 val = tr32(OTP_STATUS);
13273 if (val & OTP_STATUS_CMD_DONE)
13278 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13281 /* Read the gphy configuration from the OTP region of the chip. The gphy
13282 * configuration is a 32-bit value that straddles the alignment boundary.
13283 * We do two 32-bit reads and then shift and merge the results.
13285 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13287 u32 bhalf_otp, thalf_otp;
13289 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13291 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13294 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13296 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13299 thalf_otp = tr32(OTP_READ_DATA);
13301 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13303 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13306 bhalf_otp = tr32(OTP_READ_DATA);
13308 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13311 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13313 u32 adv = ADVERTISED_Autoneg |
13316 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13317 adv |= ADVERTISED_1000baseT_Half |
13318 ADVERTISED_1000baseT_Full;
13320 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13321 adv |= ADVERTISED_100baseT_Half |
13322 ADVERTISED_100baseT_Full |
13323 ADVERTISED_10baseT_Half |
13324 ADVERTISED_10baseT_Full |
13327 adv |= ADVERTISED_FIBRE;
13329 tp->link_config.advertising = adv;
13330 tp->link_config.speed = SPEED_INVALID;
13331 tp->link_config.duplex = DUPLEX_INVALID;
13332 tp->link_config.autoneg = AUTONEG_ENABLE;
13333 tp->link_config.active_speed = SPEED_INVALID;
13334 tp->link_config.active_duplex = DUPLEX_INVALID;
13335 tp->link_config.orig_speed = SPEED_INVALID;
13336 tp->link_config.orig_duplex = DUPLEX_INVALID;
13337 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13340 static int __devinit tg3_phy_probe(struct tg3 *tp)
13342 u32 hw_phy_id_1, hw_phy_id_2;
13343 u32 hw_phy_id, hw_phy_id_masked;
13346 /* flow control autonegotiation is default behavior */
13347 tg3_flag_set(tp, PAUSE_AUTONEG);
13348 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13350 if (tg3_flag(tp, USE_PHYLIB))
13351 return tg3_phy_init(tp);
13353 /* Reading the PHY ID register can conflict with ASF
13354 * firmware access to the PHY hardware.
13357 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13358 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13360 /* Now read the physical PHY_ID from the chip and verify
13361 * that it is sane. If it doesn't look good, we fall back
13362 * to either the hard-coded table based PHY_ID and failing
13363 * that the value found in the eeprom area.
13365 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13366 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13368 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13369 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13370 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13372 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13375 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13376 tp->phy_id = hw_phy_id;
13377 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13378 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13380 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13382 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13383 /* Do nothing, phy ID already set up in
13384 * tg3_get_eeprom_hw_cfg().
13387 struct subsys_tbl_ent *p;
13389 /* No eeprom signature? Try the hardcoded
13390 * subsys device table.
13392 p = tg3_lookup_by_subsys(tp);
13396 tp->phy_id = p->phy_id;
13398 tp->phy_id == TG3_PHY_ID_BCM8002)
13399 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13403 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13404 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13406 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13407 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13408 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13409 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13410 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13412 tg3_phy_init_link_config(tp);
13414 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13415 !tg3_flag(tp, ENABLE_APE) &&
13416 !tg3_flag(tp, ENABLE_ASF)) {
13419 tg3_readphy(tp, MII_BMSR, &bmsr);
13420 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13421 (bmsr & BMSR_LSTATUS))
13422 goto skip_phy_reset;
13424 err = tg3_phy_reset(tp);
13428 tg3_phy_set_wirespeed(tp);
13430 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13431 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13432 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13433 if (!tg3_copper_is_advertising_all(tp, mask)) {
13434 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13435 tp->link_config.flowctrl);
13437 tg3_writephy(tp, MII_BMCR,
13438 BMCR_ANENABLE | BMCR_ANRESTART);
13443 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13444 err = tg3_init_5401phy_dsp(tp);
13448 err = tg3_init_5401phy_dsp(tp);
13454 static void __devinit tg3_read_vpd(struct tg3 *tp)
13457 unsigned int block_end, rosize, len;
13461 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13465 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13467 goto out_not_found;
13469 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13470 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13471 i += PCI_VPD_LRDT_TAG_SIZE;
13473 if (block_end > vpdlen)
13474 goto out_not_found;
13476 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13477 PCI_VPD_RO_KEYWORD_MFR_ID);
13479 len = pci_vpd_info_field_size(&vpd_data[j]);
13481 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13482 if (j + len > block_end || len != 4 ||
13483 memcmp(&vpd_data[j], "1028", 4))
13486 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13487 PCI_VPD_RO_KEYWORD_VENDOR0);
13491 len = pci_vpd_info_field_size(&vpd_data[j]);
13493 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13494 if (j + len > block_end)
13497 if (len >= sizeof(tp->fw_ver))
13498 len = sizeof(tp->fw_ver) - 1;
13499 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
13500 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
13505 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13506 PCI_VPD_RO_KEYWORD_PARTNO);
13508 goto out_not_found;
13510 len = pci_vpd_info_field_size(&vpd_data[i]);
13512 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13513 if (len > TG3_BPN_SIZE ||
13514 (len + i) > vpdlen)
13515 goto out_not_found;
13517 memcpy(tp->board_part_number, &vpd_data[i], len);
13521 if (tp->board_part_number[0])
13525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13526 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13527 strcpy(tp->board_part_number, "BCM5717");
13528 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13529 strcpy(tp->board_part_number, "BCM5718");
13532 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13533 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13534 strcpy(tp->board_part_number, "BCM57780");
13535 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13536 strcpy(tp->board_part_number, "BCM57760");
13537 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13538 strcpy(tp->board_part_number, "BCM57790");
13539 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13540 strcpy(tp->board_part_number, "BCM57788");
13543 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13544 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13545 strcpy(tp->board_part_number, "BCM57761");
13546 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13547 strcpy(tp->board_part_number, "BCM57765");
13548 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13549 strcpy(tp->board_part_number, "BCM57781");
13550 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13551 strcpy(tp->board_part_number, "BCM57785");
13552 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13553 strcpy(tp->board_part_number, "BCM57791");
13554 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13555 strcpy(tp->board_part_number, "BCM57795");
13558 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13559 strcpy(tp->board_part_number, "BCM95906");
13562 strcpy(tp->board_part_number, "none");
13566 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13570 if (tg3_nvram_read(tp, offset, &val) ||
13571 (val & 0xfc000000) != 0x0c000000 ||
13572 tg3_nvram_read(tp, offset + 4, &val) ||
13579 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13581 u32 val, offset, start, ver_offset;
13583 bool newver = false;
13585 if (tg3_nvram_read(tp, 0xc, &offset) ||
13586 tg3_nvram_read(tp, 0x4, &start))
13589 offset = tg3_nvram_logical_addr(tp, offset);
13591 if (tg3_nvram_read(tp, offset, &val))
13594 if ((val & 0xfc000000) == 0x0c000000) {
13595 if (tg3_nvram_read(tp, offset + 4, &val))
13602 dst_off = strlen(tp->fw_ver);
13605 if (TG3_VER_SIZE - dst_off < 16 ||
13606 tg3_nvram_read(tp, offset + 8, &ver_offset))
13609 offset = offset + ver_offset - start;
13610 for (i = 0; i < 16; i += 4) {
13612 if (tg3_nvram_read_be32(tp, offset + i, &v))
13615 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13620 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13623 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13624 TG3_NVM_BCVER_MAJSFT;
13625 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13626 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13627 "v%d.%02d", major, minor);
13631 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13633 u32 val, major, minor;
13635 /* Use native endian representation */
13636 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13639 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13640 TG3_NVM_HWSB_CFG1_MAJSFT;
13641 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13642 TG3_NVM_HWSB_CFG1_MINSFT;
13644 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13647 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13649 u32 offset, major, minor, build;
13651 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13653 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13656 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13657 case TG3_EEPROM_SB_REVISION_0:
13658 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13660 case TG3_EEPROM_SB_REVISION_2:
13661 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13663 case TG3_EEPROM_SB_REVISION_3:
13664 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13666 case TG3_EEPROM_SB_REVISION_4:
13667 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13669 case TG3_EEPROM_SB_REVISION_5:
13670 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13672 case TG3_EEPROM_SB_REVISION_6:
13673 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13679 if (tg3_nvram_read(tp, offset, &val))
13682 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13683 TG3_EEPROM_SB_EDH_BLD_SHFT;
13684 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13685 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13686 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13688 if (minor > 99 || build > 26)
13691 offset = strlen(tp->fw_ver);
13692 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13693 " v%d.%02d", major, minor);
13696 offset = strlen(tp->fw_ver);
13697 if (offset < TG3_VER_SIZE - 1)
13698 tp->fw_ver[offset] = 'a' + build - 1;
13702 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13704 u32 val, offset, start;
13707 for (offset = TG3_NVM_DIR_START;
13708 offset < TG3_NVM_DIR_END;
13709 offset += TG3_NVM_DIRENT_SIZE) {
13710 if (tg3_nvram_read(tp, offset, &val))
13713 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13717 if (offset == TG3_NVM_DIR_END)
13720 if (!tg3_flag(tp, 5705_PLUS))
13721 start = 0x08000000;
13722 else if (tg3_nvram_read(tp, offset - 4, &start))
13725 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13726 !tg3_fw_img_is_valid(tp, offset) ||
13727 tg3_nvram_read(tp, offset + 8, &val))
13730 offset += val - start;
13732 vlen = strlen(tp->fw_ver);
13734 tp->fw_ver[vlen++] = ',';
13735 tp->fw_ver[vlen++] = ' ';
13737 for (i = 0; i < 4; i++) {
13739 if (tg3_nvram_read_be32(tp, offset, &v))
13742 offset += sizeof(v);
13744 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13745 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13749 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13754 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13760 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13763 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13764 if (apedata != APE_SEG_SIG_MAGIC)
13767 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13768 if (!(apedata & APE_FW_STATUS_READY))
13771 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13773 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13774 tg3_flag_set(tp, APE_HAS_NCSI);
13780 vlen = strlen(tp->fw_ver);
13782 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13784 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13785 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13786 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13787 (apedata & APE_FW_VERSION_BLDMSK));
13790 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13793 bool vpd_vers = false;
13795 if (tp->fw_ver[0] != 0)
13798 if (tg3_flag(tp, NO_NVRAM)) {
13799 strcat(tp->fw_ver, "sb");
13803 if (tg3_nvram_read(tp, 0, &val))
13806 if (val == TG3_EEPROM_MAGIC)
13807 tg3_read_bc_ver(tp);
13808 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13809 tg3_read_sb_ver(tp, val);
13810 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13811 tg3_read_hwsb_ver(tp);
13818 if (tg3_flag(tp, ENABLE_APE)) {
13819 if (tg3_flag(tp, ENABLE_ASF))
13820 tg3_read_dash_ver(tp);
13821 } else if (tg3_flag(tp, ENABLE_ASF)) {
13822 tg3_read_mgmtfw_ver(tp);
13826 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13829 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13831 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13833 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13834 return TG3_RX_RET_MAX_SIZE_5717;
13835 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13836 return TG3_RX_RET_MAX_SIZE_5700;
13838 return TG3_RX_RET_MAX_SIZE_5705;
13841 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13842 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13843 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13844 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13848 static int __devinit tg3_get_invariants(struct tg3 *tp)
13851 u32 pci_state_reg, grc_misc_cfg;
13856 /* Force memory write invalidate off. If we leave it on,
13857 * then on 5700_BX chips we have to enable a workaround.
13858 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13859 * to match the cacheline size. The Broadcom driver have this
13860 * workaround but turns MWI off all the times so never uses
13861 * it. This seems to suggest that the workaround is insufficient.
13863 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13864 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13865 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13867 /* Important! -- Make sure register accesses are byteswapped
13868 * correctly. Also, for those chips that require it, make
13869 * sure that indirect register accesses are enabled before
13870 * the first operation.
13872 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13874 tp->misc_host_ctrl |= (misc_ctrl_reg &
13875 MISC_HOST_CTRL_CHIPREV);
13876 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13877 tp->misc_host_ctrl);
13879 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13880 MISC_HOST_CTRL_CHIPREV_SHIFT);
13881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13882 u32 prod_id_asic_rev;
13884 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13885 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13886 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13887 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13888 pci_read_config_dword(tp->pdev,
13889 TG3PCI_GEN2_PRODID_ASICREV,
13890 &prod_id_asic_rev);
13891 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13892 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13893 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13894 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13895 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13896 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13897 pci_read_config_dword(tp->pdev,
13898 TG3PCI_GEN15_PRODID_ASICREV,
13899 &prod_id_asic_rev);
13901 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13902 &prod_id_asic_rev);
13904 tp->pci_chip_rev_id = prod_id_asic_rev;
13907 /* Wrong chip ID in 5752 A0. This code can be removed later
13908 * as A0 is not in production.
13910 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13911 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13913 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13914 * we need to disable memory and use config. cycles
13915 * only to access all registers. The 5702/03 chips
13916 * can mistakenly decode the special cycles from the
13917 * ICH chipsets as memory write cycles, causing corruption
13918 * of register and memory space. Only certain ICH bridges
13919 * will drive special cycles with non-zero data during the
13920 * address phase which can fall within the 5703's address
13921 * range. This is not an ICH bug as the PCI spec allows
13922 * non-zero address during special cycles. However, only
13923 * these ICH bridges are known to drive non-zero addresses
13924 * during special cycles.
13926 * Since special cycles do not cross PCI bridges, we only
13927 * enable this workaround if the 5703 is on the secondary
13928 * bus of these ICH bridges.
13930 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13931 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13932 static struct tg3_dev_id {
13936 } ich_chipsets[] = {
13937 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13939 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13941 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13943 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13947 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13948 struct pci_dev *bridge = NULL;
13950 while (pci_id->vendor != 0) {
13951 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13957 if (pci_id->rev != PCI_ANY_ID) {
13958 if (bridge->revision > pci_id->rev)
13961 if (bridge->subordinate &&
13962 (bridge->subordinate->number ==
13963 tp->pdev->bus->number)) {
13964 tg3_flag_set(tp, ICH_WORKAROUND);
13965 pci_dev_put(bridge);
13971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13972 static struct tg3_dev_id {
13975 } bridge_chipsets[] = {
13976 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13977 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13980 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13981 struct pci_dev *bridge = NULL;
13983 while (pci_id->vendor != 0) {
13984 bridge = pci_get_device(pci_id->vendor,
13991 if (bridge->subordinate &&
13992 (bridge->subordinate->number <=
13993 tp->pdev->bus->number) &&
13994 (bridge->subordinate->subordinate >=
13995 tp->pdev->bus->number)) {
13996 tg3_flag_set(tp, 5701_DMA_BUG);
13997 pci_dev_put(bridge);
14003 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14004 * DMA addresses > 40-bit. This bridge may have other additional
14005 * 57xx devices behind it in some 4-port NIC designs for example.
14006 * Any tg3 device found behind the bridge will also need the 40-bit
14009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14011 tg3_flag_set(tp, 5780_CLASS);
14012 tg3_flag_set(tp, 40BIT_DMA_BUG);
14013 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14015 struct pci_dev *bridge = NULL;
14018 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14019 PCI_DEVICE_ID_SERVERWORKS_EPB,
14021 if (bridge && bridge->subordinate &&
14022 (bridge->subordinate->number <=
14023 tp->pdev->bus->number) &&
14024 (bridge->subordinate->subordinate >=
14025 tp->pdev->bus->number)) {
14026 tg3_flag_set(tp, 40BIT_DMA_BUG);
14027 pci_dev_put(bridge);
14033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14035 tp->pdev_peer = tg3_find_peer(tp);
14037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14040 tg3_flag_set(tp, 5717_PLUS);
14042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14043 tg3_flag(tp, 5717_PLUS))
14044 tg3_flag_set(tp, 57765_PLUS);
14046 /* Intentionally exclude ASIC_REV_5906 */
14047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14053 tg3_flag(tp, 57765_PLUS))
14054 tg3_flag_set(tp, 5755_PLUS);
14056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14059 tg3_flag(tp, 5755_PLUS) ||
14060 tg3_flag(tp, 5780_CLASS))
14061 tg3_flag_set(tp, 5750_PLUS);
14063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14064 tg3_flag(tp, 5750_PLUS))
14065 tg3_flag_set(tp, 5705_PLUS);
14067 /* Determine TSO capabilities */
14068 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14069 ; /* Do nothing. HW bug. */
14070 else if (tg3_flag(tp, 57765_PLUS))
14071 tg3_flag_set(tp, HW_TSO_3);
14072 else if (tg3_flag(tp, 5755_PLUS) ||
14073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14074 tg3_flag_set(tp, HW_TSO_2);
14075 else if (tg3_flag(tp, 5750_PLUS)) {
14076 tg3_flag_set(tp, HW_TSO_1);
14077 tg3_flag_set(tp, TSO_BUG);
14078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14079 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14080 tg3_flag_clear(tp, TSO_BUG);
14081 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14082 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14083 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14084 tg3_flag_set(tp, TSO_BUG);
14085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14086 tp->fw_needed = FIRMWARE_TG3TSO5;
14088 tp->fw_needed = FIRMWARE_TG3TSO;
14091 /* Selectively allow TSO based on operating conditions */
14092 if (tg3_flag(tp, HW_TSO_1) ||
14093 tg3_flag(tp, HW_TSO_2) ||
14094 tg3_flag(tp, HW_TSO_3) ||
14096 /* For firmware TSO, assume ASF is disabled.
14097 * We'll disable TSO later if we discover ASF
14098 * is enabled in tg3_get_eeprom_hw_cfg().
14100 tg3_flag_set(tp, TSO_CAPABLE);
14102 tg3_flag_clear(tp, TSO_CAPABLE);
14103 tg3_flag_clear(tp, TSO_BUG);
14104 tp->fw_needed = NULL;
14107 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14108 tp->fw_needed = FIRMWARE_TG3;
14112 if (tg3_flag(tp, 5750_PLUS)) {
14113 tg3_flag_set(tp, SUPPORT_MSI);
14114 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14115 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14116 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14117 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14118 tp->pdev_peer == tp->pdev))
14119 tg3_flag_clear(tp, SUPPORT_MSI);
14121 if (tg3_flag(tp, 5755_PLUS) ||
14122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14123 tg3_flag_set(tp, 1SHOT_MSI);
14126 if (tg3_flag(tp, 57765_PLUS)) {
14127 tg3_flag_set(tp, SUPPORT_MSIX);
14128 tp->irq_max = TG3_IRQ_MAX_VECS;
14132 if (tg3_flag(tp, 5755_PLUS) ||
14133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14134 tg3_flag_set(tp, SHORT_DMA_BUG);
14136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14137 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14139 if (tg3_flag(tp, 5717_PLUS))
14140 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14142 if (tg3_flag(tp, 57765_PLUS) &&
14143 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14144 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14146 if (!tg3_flag(tp, 5705_PLUS) ||
14147 tg3_flag(tp, 5780_CLASS) ||
14148 tg3_flag(tp, USE_JUMBO_BDFLAG))
14149 tg3_flag_set(tp, JUMBO_CAPABLE);
14151 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14154 if (pci_is_pcie(tp->pdev)) {
14157 tg3_flag_set(tp, PCI_EXPRESS);
14159 tp->pcie_readrq = 4096;
14160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14162 tp->pcie_readrq = 2048;
14164 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14166 pci_read_config_word(tp->pdev,
14167 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14169 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14170 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14172 tg3_flag_clear(tp, HW_TSO_2);
14173 tg3_flag_clear(tp, TSO_CAPABLE);
14175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14177 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14178 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14179 tg3_flag_set(tp, CLKREQ_BUG);
14180 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14181 tg3_flag_set(tp, L1PLLPD_EN);
14183 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14184 /* BCM5785 devices are effectively PCIe devices, and should
14185 * follow PCIe codepaths, but do not have a PCIe capabilities
14188 tg3_flag_set(tp, PCI_EXPRESS);
14189 } else if (!tg3_flag(tp, 5705_PLUS) ||
14190 tg3_flag(tp, 5780_CLASS)) {
14191 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14192 if (!tp->pcix_cap) {
14193 dev_err(&tp->pdev->dev,
14194 "Cannot find PCI-X capability, aborting\n");
14198 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14199 tg3_flag_set(tp, PCIX_MODE);
14202 /* If we have an AMD 762 or VIA K8T800 chipset, write
14203 * reordering to the mailbox registers done by the host
14204 * controller can cause major troubles. We read back from
14205 * every mailbox register write to force the writes to be
14206 * posted to the chip in order.
14208 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14209 !tg3_flag(tp, PCI_EXPRESS))
14210 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14212 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14213 &tp->pci_cacheline_sz);
14214 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14215 &tp->pci_lat_timer);
14216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14217 tp->pci_lat_timer < 64) {
14218 tp->pci_lat_timer = 64;
14219 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14220 tp->pci_lat_timer);
14223 /* Important! -- It is critical that the PCI-X hw workaround
14224 * situation is decided before the first MMIO register access.
14226 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14227 /* 5700 BX chips need to have their TX producer index
14228 * mailboxes written twice to workaround a bug.
14230 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14232 /* If we are in PCI-X mode, enable register write workaround.
14234 * The workaround is to use indirect register accesses
14235 * for all chip writes not to mailbox registers.
14237 if (tg3_flag(tp, PCIX_MODE)) {
14240 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14242 /* The chip can have it's power management PCI config
14243 * space registers clobbered due to this bug.
14244 * So explicitly force the chip into D0 here.
14246 pci_read_config_dword(tp->pdev,
14247 tp->pm_cap + PCI_PM_CTRL,
14249 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14250 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14251 pci_write_config_dword(tp->pdev,
14252 tp->pm_cap + PCI_PM_CTRL,
14255 /* Also, force SERR#/PERR# in PCI command. */
14256 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14257 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14258 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14262 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14263 tg3_flag_set(tp, PCI_HIGH_SPEED);
14264 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14265 tg3_flag_set(tp, PCI_32BIT);
14267 /* Chip-specific fixup from Broadcom driver */
14268 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14269 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14270 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14271 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14274 /* Default fast path register access methods */
14275 tp->read32 = tg3_read32;
14276 tp->write32 = tg3_write32;
14277 tp->read32_mbox = tg3_read32;
14278 tp->write32_mbox = tg3_write32;
14279 tp->write32_tx_mbox = tg3_write32;
14280 tp->write32_rx_mbox = tg3_write32;
14282 /* Various workaround register access methods */
14283 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14284 tp->write32 = tg3_write_indirect_reg32;
14285 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14286 (tg3_flag(tp, PCI_EXPRESS) &&
14287 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14289 * Back to back register writes can cause problems on these
14290 * chips, the workaround is to read back all reg writes
14291 * except those to mailbox regs.
14293 * See tg3_write_indirect_reg32().
14295 tp->write32 = tg3_write_flush_reg32;
14298 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14299 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14300 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14301 tp->write32_rx_mbox = tg3_write_flush_reg32;
14304 if (tg3_flag(tp, ICH_WORKAROUND)) {
14305 tp->read32 = tg3_read_indirect_reg32;
14306 tp->write32 = tg3_write_indirect_reg32;
14307 tp->read32_mbox = tg3_read_indirect_mbox;
14308 tp->write32_mbox = tg3_write_indirect_mbox;
14309 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14310 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14315 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14316 pci_cmd &= ~PCI_COMMAND_MEMORY;
14317 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14320 tp->read32_mbox = tg3_read32_mbox_5906;
14321 tp->write32_mbox = tg3_write32_mbox_5906;
14322 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14323 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14326 if (tp->write32 == tg3_write_indirect_reg32 ||
14327 (tg3_flag(tp, PCIX_MODE) &&
14328 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14330 tg3_flag_set(tp, SRAM_USE_CONFIG);
14332 /* The memory arbiter has to be enabled in order for SRAM accesses
14333 * to succeed. Normally on powerup the tg3 chip firmware will make
14334 * sure it is enabled, but other entities such as system netboot
14335 * code might disable it.
14337 val = tr32(MEMARB_MODE);
14338 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14340 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14342 tg3_flag(tp, 5780_CLASS)) {
14343 if (tg3_flag(tp, PCIX_MODE)) {
14344 pci_read_config_dword(tp->pdev,
14345 tp->pcix_cap + PCI_X_STATUS,
14347 tp->pci_fn = val & 0x7;
14349 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14350 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14351 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14352 NIC_SRAM_CPMUSTAT_SIG) {
14353 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14354 tp->pci_fn = tp->pci_fn ? 1 : 0;
14356 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14357 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14358 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14359 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14360 NIC_SRAM_CPMUSTAT_SIG) {
14361 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14362 TG3_CPMU_STATUS_FSHFT_5719;
14366 /* Get eeprom hw config before calling tg3_set_power_state().
14367 * In particular, the TG3_FLAG_IS_NIC flag must be
14368 * determined before calling tg3_set_power_state() so that
14369 * we know whether or not to switch out of Vaux power.
14370 * When the flag is set, it means that GPIO1 is used for eeprom
14371 * write protect and also implies that it is a LOM where GPIOs
14372 * are not used to switch power.
14374 tg3_get_eeprom_hw_cfg(tp);
14376 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14377 tg3_flag_clear(tp, TSO_CAPABLE);
14378 tg3_flag_clear(tp, TSO_BUG);
14379 tp->fw_needed = NULL;
14382 if (tg3_flag(tp, ENABLE_APE)) {
14383 /* Allow reads and writes to the
14384 * APE register and memory space.
14386 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14387 PCISTATE_ALLOW_APE_SHMEM_WR |
14388 PCISTATE_ALLOW_APE_PSPACE_WR;
14389 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14392 tg3_ape_lock_init(tp);
14395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14399 tg3_flag(tp, 57765_PLUS))
14400 tg3_flag_set(tp, CPMU_PRESENT);
14402 /* Set up tp->grc_local_ctrl before calling
14403 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14404 * will bring 5700's external PHY out of reset.
14405 * It is also used as eeprom write protect on LOMs.
14407 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14409 tg3_flag(tp, EEPROM_WRITE_PROT))
14410 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14411 GRC_LCLCTRL_GPIO_OUTPUT1);
14412 /* Unused GPIO3 must be driven as output on 5752 because there
14413 * are no pull-up resistors on unused GPIO pins.
14415 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14416 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14421 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14423 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14424 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14425 /* Turn off the debug UART. */
14426 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14427 if (tg3_flag(tp, IS_NIC))
14428 /* Keep VMain power. */
14429 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14430 GRC_LCLCTRL_GPIO_OUTPUT0;
14433 /* Switch out of Vaux if it is a NIC */
14434 tg3_pwrsrc_switch_to_vmain(tp);
14436 /* Derive initial jumbo mode from MTU assigned in
14437 * ether_setup() via the alloc_etherdev() call
14439 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14440 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14442 /* Determine WakeOnLan speed to use. */
14443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14444 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14445 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14446 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14447 tg3_flag_clear(tp, WOL_SPEED_100MB);
14449 tg3_flag_set(tp, WOL_SPEED_100MB);
14452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14453 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14455 /* A few boards don't want Ethernet@WireSpeed phy feature */
14456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14457 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14458 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14459 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14460 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14461 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14462 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14464 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14465 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14466 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14467 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14468 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14470 if (tg3_flag(tp, 5705_PLUS) &&
14471 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14472 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14473 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14474 !tg3_flag(tp, 57765_PLUS)) {
14475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14479 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14480 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14481 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14482 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14483 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14485 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14489 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14490 tp->phy_otp = tg3_read_otp_phycfg(tp);
14491 if (tp->phy_otp == 0)
14492 tp->phy_otp = TG3_OTP_DEFAULT;
14495 if (tg3_flag(tp, CPMU_PRESENT))
14496 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14498 tp->mi_mode = MAC_MI_MODE_BASE;
14500 tp->coalesce_mode = 0;
14501 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14502 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14503 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14505 /* Set these bits to enable statistics workaround. */
14506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14507 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14508 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14509 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14510 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14515 tg3_flag_set(tp, USE_PHYLIB);
14517 err = tg3_mdio_init(tp);
14521 /* Initialize data/descriptor byte/word swapping. */
14522 val = tr32(GRC_MODE);
14523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14524 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14525 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14526 GRC_MODE_B2HRX_ENABLE |
14527 GRC_MODE_HTX2B_ENABLE |
14528 GRC_MODE_HOST_STACKUP);
14530 val &= GRC_MODE_HOST_STACKUP;
14532 tw32(GRC_MODE, val | tp->grc_mode);
14534 tg3_switch_clocks(tp);
14536 /* Clear this out for sanity. */
14537 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14539 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
14540 tw32(TG3PCI_REG_BASE_ADDR, 0);
14542 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14544 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14545 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14546 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14548 if (chiprevid == CHIPREV_ID_5701_A0 ||
14549 chiprevid == CHIPREV_ID_5701_B0 ||
14550 chiprevid == CHIPREV_ID_5701_B2 ||
14551 chiprevid == CHIPREV_ID_5701_B5) {
14552 void __iomem *sram_base;
14554 /* Write some dummy words into the SRAM status block
14555 * area, see if it reads back correctly. If the return
14556 * value is bad, force enable the PCIX workaround.
14558 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14560 writel(0x00000000, sram_base);
14561 writel(0x00000000, sram_base + 4);
14562 writel(0xffffffff, sram_base + 4);
14563 if (readl(sram_base) != 0x00000000)
14564 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14569 tg3_nvram_init(tp);
14571 grc_misc_cfg = tr32(GRC_MISC_CFG);
14572 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14574 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14575 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14576 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14577 tg3_flag_set(tp, IS_5788);
14579 if (!tg3_flag(tp, IS_5788) &&
14580 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14581 tg3_flag_set(tp, TAGGED_STATUS);
14582 if (tg3_flag(tp, TAGGED_STATUS)) {
14583 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14584 HOSTCC_MODE_CLRTICK_TXBD);
14586 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14587 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14588 tp->misc_host_ctrl);
14591 /* Preserve the APE MAC_MODE bits */
14592 if (tg3_flag(tp, ENABLE_APE))
14593 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14597 /* these are limited to 10/100 only */
14598 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14599 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14600 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14601 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14602 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14603 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14604 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14605 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14606 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14607 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14608 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14609 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14610 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14611 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14612 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14613 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14615 err = tg3_phy_probe(tp);
14617 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14618 /* ... but do not return immediately ... */
14623 tg3_read_fw_ver(tp);
14625 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14626 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14629 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14631 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14634 /* 5700 {AX,BX} chips have a broken status block link
14635 * change bit implementation, so we must use the
14636 * status register in those cases.
14638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14639 tg3_flag_set(tp, USE_LINKCHG_REG);
14641 tg3_flag_clear(tp, USE_LINKCHG_REG);
14643 /* The led_ctrl is set during tg3_phy_probe, here we might
14644 * have to force the link status polling mechanism based
14645 * upon subsystem IDs.
14647 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14649 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14650 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14651 tg3_flag_set(tp, USE_LINKCHG_REG);
14654 /* For all SERDES we poll the MAC status register. */
14655 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14656 tg3_flag_set(tp, POLL_SERDES);
14658 tg3_flag_clear(tp, POLL_SERDES);
14660 tp->rx_offset = NET_IP_ALIGN;
14661 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14663 tg3_flag(tp, PCIX_MODE)) {
14665 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14666 tp->rx_copy_thresh = ~(u16)0;
14670 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14671 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14672 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14674 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14676 /* Increment the rx prod index on the rx std ring by at most
14677 * 8 for these chips to workaround hw errata.
14679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14682 tp->rx_std_max_post = 8;
14684 if (tg3_flag(tp, ASPM_WORKAROUND))
14685 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14686 PCIE_PWR_MGMT_L1_THRESH_MSK;
14691 #ifdef CONFIG_SPARC
14692 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14694 struct net_device *dev = tp->dev;
14695 struct pci_dev *pdev = tp->pdev;
14696 struct device_node *dp = pci_device_to_OF_node(pdev);
14697 const unsigned char *addr;
14700 addr = of_get_property(dp, "local-mac-address", &len);
14701 if (addr && len == 6) {
14702 memcpy(dev->dev_addr, addr, 6);
14703 memcpy(dev->perm_addr, dev->dev_addr, 6);
14709 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14711 struct net_device *dev = tp->dev;
14713 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14714 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14719 static int __devinit tg3_get_device_address(struct tg3 *tp)
14721 struct net_device *dev = tp->dev;
14722 u32 hi, lo, mac_offset;
14725 #ifdef CONFIG_SPARC
14726 if (!tg3_get_macaddr_sparc(tp))
14731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14732 tg3_flag(tp, 5780_CLASS)) {
14733 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14735 if (tg3_nvram_lock(tp))
14736 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14738 tg3_nvram_unlock(tp);
14739 } else if (tg3_flag(tp, 5717_PLUS)) {
14740 if (tp->pci_fn & 1)
14742 if (tp->pci_fn > 1)
14743 mac_offset += 0x18c;
14744 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14747 /* First try to get it from MAC address mailbox. */
14748 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14749 if ((hi >> 16) == 0x484b) {
14750 dev->dev_addr[0] = (hi >> 8) & 0xff;
14751 dev->dev_addr[1] = (hi >> 0) & 0xff;
14753 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14754 dev->dev_addr[2] = (lo >> 24) & 0xff;
14755 dev->dev_addr[3] = (lo >> 16) & 0xff;
14756 dev->dev_addr[4] = (lo >> 8) & 0xff;
14757 dev->dev_addr[5] = (lo >> 0) & 0xff;
14759 /* Some old bootcode may report a 0 MAC address in SRAM */
14760 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14763 /* Next, try NVRAM. */
14764 if (!tg3_flag(tp, NO_NVRAM) &&
14765 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14766 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14767 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14768 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14770 /* Finally just fetch it out of the MAC control regs. */
14772 hi = tr32(MAC_ADDR_0_HIGH);
14773 lo = tr32(MAC_ADDR_0_LOW);
14775 dev->dev_addr[5] = lo & 0xff;
14776 dev->dev_addr[4] = (lo >> 8) & 0xff;
14777 dev->dev_addr[3] = (lo >> 16) & 0xff;
14778 dev->dev_addr[2] = (lo >> 24) & 0xff;
14779 dev->dev_addr[1] = hi & 0xff;
14780 dev->dev_addr[0] = (hi >> 8) & 0xff;
14784 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14785 #ifdef CONFIG_SPARC
14786 if (!tg3_get_default_macaddr_sparc(tp))
14791 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14795 #define BOUNDARY_SINGLE_CACHELINE 1
14796 #define BOUNDARY_MULTI_CACHELINE 2
14798 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14800 int cacheline_size;
14804 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14806 cacheline_size = 1024;
14808 cacheline_size = (int) byte * 4;
14810 /* On 5703 and later chips, the boundary bits have no
14813 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14814 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14815 !tg3_flag(tp, PCI_EXPRESS))
14818 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14819 goal = BOUNDARY_MULTI_CACHELINE;
14821 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14822 goal = BOUNDARY_SINGLE_CACHELINE;
14828 if (tg3_flag(tp, 57765_PLUS)) {
14829 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14836 /* PCI controllers on most RISC systems tend to disconnect
14837 * when a device tries to burst across a cache-line boundary.
14838 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14840 * Unfortunately, for PCI-E there are only limited
14841 * write-side controls for this, and thus for reads
14842 * we will still get the disconnects. We'll also waste
14843 * these PCI cycles for both read and write for chips
14844 * other than 5700 and 5701 which do not implement the
14847 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14848 switch (cacheline_size) {
14853 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14854 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14855 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14857 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14858 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14863 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14864 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14868 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14869 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14872 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14873 switch (cacheline_size) {
14877 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14878 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14879 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14885 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14886 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14890 switch (cacheline_size) {
14892 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14893 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14894 DMA_RWCTRL_WRITE_BNDRY_16);
14899 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14900 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14901 DMA_RWCTRL_WRITE_BNDRY_32);
14906 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14907 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14908 DMA_RWCTRL_WRITE_BNDRY_64);
14913 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14914 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14915 DMA_RWCTRL_WRITE_BNDRY_128);
14920 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14921 DMA_RWCTRL_WRITE_BNDRY_256);
14924 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14925 DMA_RWCTRL_WRITE_BNDRY_512);
14929 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14930 DMA_RWCTRL_WRITE_BNDRY_1024);
14939 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14941 struct tg3_internal_buffer_desc test_desc;
14942 u32 sram_dma_descs;
14945 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14947 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14948 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14949 tw32(RDMAC_STATUS, 0);
14950 tw32(WDMAC_STATUS, 0);
14952 tw32(BUFMGR_MODE, 0);
14953 tw32(FTQ_RESET, 0);
14955 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14956 test_desc.addr_lo = buf_dma & 0xffffffff;
14957 test_desc.nic_mbuf = 0x00002100;
14958 test_desc.len = size;
14961 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14962 * the *second* time the tg3 driver was getting loaded after an
14965 * Broadcom tells me:
14966 * ...the DMA engine is connected to the GRC block and a DMA
14967 * reset may affect the GRC block in some unpredictable way...
14968 * The behavior of resets to individual blocks has not been tested.
14970 * Broadcom noted the GRC reset will also reset all sub-components.
14973 test_desc.cqid_sqid = (13 << 8) | 2;
14975 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14978 test_desc.cqid_sqid = (16 << 8) | 7;
14980 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14983 test_desc.flags = 0x00000005;
14985 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14988 val = *(((u32 *)&test_desc) + i);
14989 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14990 sram_dma_descs + (i * sizeof(u32)));
14991 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14993 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14996 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14998 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15001 for (i = 0; i < 40; i++) {
15005 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15007 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15008 if ((val & 0xffff) == sram_dma_descs) {
15019 #define TEST_BUFFER_SIZE 0x2000
15021 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15022 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15026 static int __devinit tg3_test_dma(struct tg3 *tp)
15028 dma_addr_t buf_dma;
15029 u32 *buf, saved_dma_rwctrl;
15032 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15033 &buf_dma, GFP_KERNEL);
15039 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15040 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15042 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15044 if (tg3_flag(tp, 57765_PLUS))
15047 if (tg3_flag(tp, PCI_EXPRESS)) {
15048 /* DMA read watermark not used on PCIE */
15049 tp->dma_rwctrl |= 0x00180000;
15050 } else if (!tg3_flag(tp, PCIX_MODE)) {
15051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15053 tp->dma_rwctrl |= 0x003f0000;
15055 tp->dma_rwctrl |= 0x003f000f;
15057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15059 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15060 u32 read_water = 0x7;
15062 /* If the 5704 is behind the EPB bridge, we can
15063 * do the less restrictive ONE_DMA workaround for
15064 * better performance.
15066 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15068 tp->dma_rwctrl |= 0x8000;
15069 else if (ccval == 0x6 || ccval == 0x7)
15070 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15074 /* Set bit 23 to enable PCIX hw bug fix */
15076 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15077 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15079 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15080 /* 5780 always in PCIX mode */
15081 tp->dma_rwctrl |= 0x00144000;
15082 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15083 /* 5714 always in PCIX mode */
15084 tp->dma_rwctrl |= 0x00148000;
15086 tp->dma_rwctrl |= 0x001b000f;
15090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15092 tp->dma_rwctrl &= 0xfffffff0;
15094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15096 /* Remove this if it causes problems for some boards. */
15097 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15099 /* On 5700/5701 chips, we need to set this bit.
15100 * Otherwise the chip will issue cacheline transactions
15101 * to streamable DMA memory with not all the byte
15102 * enables turned on. This is an error on several
15103 * RISC PCI controllers, in particular sparc64.
15105 * On 5703/5704 chips, this bit has been reassigned
15106 * a different meaning. In particular, it is used
15107 * on those chips to enable a PCI-X workaround.
15109 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15112 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15115 /* Unneeded, already done by tg3_get_invariants. */
15116 tg3_switch_clocks(tp);
15119 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15120 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15123 /* It is best to perform DMA test with maximum write burst size
15124 * to expose the 5700/5701 write DMA bug.
15126 saved_dma_rwctrl = tp->dma_rwctrl;
15127 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15128 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15133 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15136 /* Send the buffer to the chip. */
15137 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15139 dev_err(&tp->pdev->dev,
15140 "%s: Buffer write failed. err = %d\n",
15146 /* validate data reached card RAM correctly. */
15147 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15149 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15150 if (le32_to_cpu(val) != p[i]) {
15151 dev_err(&tp->pdev->dev,
15152 "%s: Buffer corrupted on device! "
15153 "(%d != %d)\n", __func__, val, i);
15154 /* ret = -ENODEV here? */
15159 /* Now read it back. */
15160 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15162 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15163 "err = %d\n", __func__, ret);
15168 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15172 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15173 DMA_RWCTRL_WRITE_BNDRY_16) {
15174 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15175 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15176 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15179 dev_err(&tp->pdev->dev,
15180 "%s: Buffer corrupted on read back! "
15181 "(%d != %d)\n", __func__, p[i], i);
15187 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15193 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15194 DMA_RWCTRL_WRITE_BNDRY_16) {
15195 /* DMA test passed without adjusting DMA boundary,
15196 * now look for chipsets that are known to expose the
15197 * DMA bug without failing the test.
15199 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15200 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15201 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15203 /* Safe to use the calculated DMA boundary. */
15204 tp->dma_rwctrl = saved_dma_rwctrl;
15207 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15211 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15216 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15218 if (tg3_flag(tp, 57765_PLUS)) {
15219 tp->bufmgr_config.mbuf_read_dma_low_water =
15220 DEFAULT_MB_RDMA_LOW_WATER_5705;
15221 tp->bufmgr_config.mbuf_mac_rx_low_water =
15222 DEFAULT_MB_MACRX_LOW_WATER_57765;
15223 tp->bufmgr_config.mbuf_high_water =
15224 DEFAULT_MB_HIGH_WATER_57765;
15226 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15227 DEFAULT_MB_RDMA_LOW_WATER_5705;
15228 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15229 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15230 tp->bufmgr_config.mbuf_high_water_jumbo =
15231 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15232 } else if (tg3_flag(tp, 5705_PLUS)) {
15233 tp->bufmgr_config.mbuf_read_dma_low_water =
15234 DEFAULT_MB_RDMA_LOW_WATER_5705;
15235 tp->bufmgr_config.mbuf_mac_rx_low_water =
15236 DEFAULT_MB_MACRX_LOW_WATER_5705;
15237 tp->bufmgr_config.mbuf_high_water =
15238 DEFAULT_MB_HIGH_WATER_5705;
15239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15240 tp->bufmgr_config.mbuf_mac_rx_low_water =
15241 DEFAULT_MB_MACRX_LOW_WATER_5906;
15242 tp->bufmgr_config.mbuf_high_water =
15243 DEFAULT_MB_HIGH_WATER_5906;
15246 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15247 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15248 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15249 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15250 tp->bufmgr_config.mbuf_high_water_jumbo =
15251 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15253 tp->bufmgr_config.mbuf_read_dma_low_water =
15254 DEFAULT_MB_RDMA_LOW_WATER;
15255 tp->bufmgr_config.mbuf_mac_rx_low_water =
15256 DEFAULT_MB_MACRX_LOW_WATER;
15257 tp->bufmgr_config.mbuf_high_water =
15258 DEFAULT_MB_HIGH_WATER;
15260 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15261 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15262 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15263 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15264 tp->bufmgr_config.mbuf_high_water_jumbo =
15265 DEFAULT_MB_HIGH_WATER_JUMBO;
15268 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15269 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15272 static char * __devinit tg3_phy_string(struct tg3 *tp)
15274 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15275 case TG3_PHY_ID_BCM5400: return "5400";
15276 case TG3_PHY_ID_BCM5401: return "5401";
15277 case TG3_PHY_ID_BCM5411: return "5411";
15278 case TG3_PHY_ID_BCM5701: return "5701";
15279 case TG3_PHY_ID_BCM5703: return "5703";
15280 case TG3_PHY_ID_BCM5704: return "5704";
15281 case TG3_PHY_ID_BCM5705: return "5705";
15282 case TG3_PHY_ID_BCM5750: return "5750";
15283 case TG3_PHY_ID_BCM5752: return "5752";
15284 case TG3_PHY_ID_BCM5714: return "5714";
15285 case TG3_PHY_ID_BCM5780: return "5780";
15286 case TG3_PHY_ID_BCM5755: return "5755";
15287 case TG3_PHY_ID_BCM5787: return "5787";
15288 case TG3_PHY_ID_BCM5784: return "5784";
15289 case TG3_PHY_ID_BCM5756: return "5722/5756";
15290 case TG3_PHY_ID_BCM5906: return "5906";
15291 case TG3_PHY_ID_BCM5761: return "5761";
15292 case TG3_PHY_ID_BCM5718C: return "5718C";
15293 case TG3_PHY_ID_BCM5718S: return "5718S";
15294 case TG3_PHY_ID_BCM57765: return "57765";
15295 case TG3_PHY_ID_BCM5719C: return "5719C";
15296 case TG3_PHY_ID_BCM5720C: return "5720C";
15297 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15298 case 0: return "serdes";
15299 default: return "unknown";
15303 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15305 if (tg3_flag(tp, PCI_EXPRESS)) {
15306 strcpy(str, "PCI Express");
15308 } else if (tg3_flag(tp, PCIX_MODE)) {
15309 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15311 strcpy(str, "PCIX:");
15313 if ((clock_ctrl == 7) ||
15314 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15315 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15316 strcat(str, "133MHz");
15317 else if (clock_ctrl == 0)
15318 strcat(str, "33MHz");
15319 else if (clock_ctrl == 2)
15320 strcat(str, "50MHz");
15321 else if (clock_ctrl == 4)
15322 strcat(str, "66MHz");
15323 else if (clock_ctrl == 6)
15324 strcat(str, "100MHz");
15326 strcpy(str, "PCI:");
15327 if (tg3_flag(tp, PCI_HIGH_SPEED))
15328 strcat(str, "66MHz");
15330 strcat(str, "33MHz");
15332 if (tg3_flag(tp, PCI_32BIT))
15333 strcat(str, ":32-bit");
15335 strcat(str, ":64-bit");
15339 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15341 struct pci_dev *peer;
15342 unsigned int func, devnr = tp->pdev->devfn & ~7;
15344 for (func = 0; func < 8; func++) {
15345 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15346 if (peer && peer != tp->pdev)
15350 /* 5704 can be configured in single-port mode, set peer to
15351 * tp->pdev in that case.
15359 * We don't need to keep the refcount elevated; there's no way
15360 * to remove one half of this device without removing the other
15367 static void __devinit tg3_init_coal(struct tg3 *tp)
15369 struct ethtool_coalesce *ec = &tp->coal;
15371 memset(ec, 0, sizeof(*ec));
15372 ec->cmd = ETHTOOL_GCOALESCE;
15373 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15374 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15375 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15376 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15377 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15378 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15379 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15380 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15381 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15383 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15384 HOSTCC_MODE_CLRTICK_TXBD)) {
15385 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15386 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15387 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15388 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15391 if (tg3_flag(tp, 5705_PLUS)) {
15392 ec->rx_coalesce_usecs_irq = 0;
15393 ec->tx_coalesce_usecs_irq = 0;
15394 ec->stats_block_coalesce_usecs = 0;
15398 static const struct net_device_ops tg3_netdev_ops = {
15399 .ndo_open = tg3_open,
15400 .ndo_stop = tg3_close,
15401 .ndo_start_xmit = tg3_start_xmit,
15402 .ndo_get_stats64 = tg3_get_stats64,
15403 .ndo_validate_addr = eth_validate_addr,
15404 .ndo_set_rx_mode = tg3_set_rx_mode,
15405 .ndo_set_mac_address = tg3_set_mac_addr,
15406 .ndo_do_ioctl = tg3_ioctl,
15407 .ndo_tx_timeout = tg3_tx_timeout,
15408 .ndo_change_mtu = tg3_change_mtu,
15409 .ndo_fix_features = tg3_fix_features,
15410 .ndo_set_features = tg3_set_features,
15411 #ifdef CONFIG_NET_POLL_CONTROLLER
15412 .ndo_poll_controller = tg3_poll_controller,
15416 static int __devinit tg3_init_one(struct pci_dev *pdev,
15417 const struct pci_device_id *ent)
15419 struct net_device *dev;
15421 int i, err, pm_cap;
15422 u32 sndmbx, rcvmbx, intmbx;
15424 u64 dma_mask, persist_dma_mask;
15427 printk_once(KERN_INFO "%s\n", version);
15429 err = pci_enable_device(pdev);
15431 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15435 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15437 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15438 goto err_out_disable_pdev;
15441 pci_set_master(pdev);
15443 /* Find power-management capability. */
15444 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15446 dev_err(&pdev->dev,
15447 "Cannot find Power Management capability, aborting\n");
15449 goto err_out_free_res;
15452 err = pci_set_power_state(pdev, PCI_D0);
15454 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15455 goto err_out_free_res;
15458 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15460 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15462 goto err_out_power_down;
15465 SET_NETDEV_DEV(dev, &pdev->dev);
15467 tp = netdev_priv(dev);
15470 tp->pm_cap = pm_cap;
15471 tp->rx_mode = TG3_DEF_RX_MODE;
15472 tp->tx_mode = TG3_DEF_TX_MODE;
15476 tp->msg_enable = tg3_debug;
15478 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15480 /* The word/byte swap controls here control register access byte
15481 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15484 tp->misc_host_ctrl =
15485 MISC_HOST_CTRL_MASK_PCI_INT |
15486 MISC_HOST_CTRL_WORD_SWAP |
15487 MISC_HOST_CTRL_INDIR_ACCESS |
15488 MISC_HOST_CTRL_PCISTATE_RW;
15490 /* The NONFRM (non-frame) byte/word swap controls take effect
15491 * on descriptor entries, anything which isn't packet data.
15493 * The StrongARM chips on the board (one for tx, one for rx)
15494 * are running in big-endian mode.
15496 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15497 GRC_MODE_WSWAP_NONFRM_DATA);
15498 #ifdef __BIG_ENDIAN
15499 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15501 spin_lock_init(&tp->lock);
15502 spin_lock_init(&tp->indirect_lock);
15503 INIT_WORK(&tp->reset_task, tg3_reset_task);
15505 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15507 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15509 goto err_out_free_dev;
15512 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15513 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15514 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15515 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15516 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15517 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15518 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15519 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15520 tg3_flag_set(tp, ENABLE_APE);
15521 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15522 if (!tp->aperegs) {
15523 dev_err(&pdev->dev,
15524 "Cannot map APE registers, aborting\n");
15526 goto err_out_iounmap;
15530 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15531 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15533 dev->ethtool_ops = &tg3_ethtool_ops;
15534 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15535 dev->netdev_ops = &tg3_netdev_ops;
15536 dev->irq = pdev->irq;
15538 err = tg3_get_invariants(tp);
15540 dev_err(&pdev->dev,
15541 "Problem fetching invariants of chip, aborting\n");
15542 goto err_out_apeunmap;
15545 /* The EPB bridge inside 5714, 5715, and 5780 and any
15546 * device behind the EPB cannot support DMA addresses > 40-bit.
15547 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15548 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15549 * do DMA address check in tg3_start_xmit().
15551 if (tg3_flag(tp, IS_5788))
15552 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15553 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15554 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15555 #ifdef CONFIG_HIGHMEM
15556 dma_mask = DMA_BIT_MASK(64);
15559 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15561 /* Configure DMA attributes. */
15562 if (dma_mask > DMA_BIT_MASK(32)) {
15563 err = pci_set_dma_mask(pdev, dma_mask);
15565 features |= NETIF_F_HIGHDMA;
15566 err = pci_set_consistent_dma_mask(pdev,
15569 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15570 "DMA for consistent allocations\n");
15571 goto err_out_apeunmap;
15575 if (err || dma_mask == DMA_BIT_MASK(32)) {
15576 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15578 dev_err(&pdev->dev,
15579 "No usable DMA configuration, aborting\n");
15580 goto err_out_apeunmap;
15584 tg3_init_bufmgr_config(tp);
15586 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15588 /* 5700 B0 chips do not support checksumming correctly due
15589 * to hardware bugs.
15591 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15592 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15594 if (tg3_flag(tp, 5755_PLUS))
15595 features |= NETIF_F_IPV6_CSUM;
15598 /* TSO is on by default on chips that support hardware TSO.
15599 * Firmware TSO on older chips gives lower performance, so it
15600 * is off by default, but can be enabled using ethtool.
15602 if ((tg3_flag(tp, HW_TSO_1) ||
15603 tg3_flag(tp, HW_TSO_2) ||
15604 tg3_flag(tp, HW_TSO_3)) &&
15605 (features & NETIF_F_IP_CSUM))
15606 features |= NETIF_F_TSO;
15607 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15608 if (features & NETIF_F_IPV6_CSUM)
15609 features |= NETIF_F_TSO6;
15610 if (tg3_flag(tp, HW_TSO_3) ||
15611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15612 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15613 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15616 features |= NETIF_F_TSO_ECN;
15619 dev->features |= features;
15620 dev->vlan_features |= features;
15623 * Add loopback capability only for a subset of devices that support
15624 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15625 * loopback for the remaining devices.
15627 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15628 !tg3_flag(tp, CPMU_PRESENT))
15629 /* Add the loopback capability */
15630 features |= NETIF_F_LOOPBACK;
15632 dev->hw_features |= features;
15634 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15635 !tg3_flag(tp, TSO_CAPABLE) &&
15636 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15637 tg3_flag_set(tp, MAX_RXPEND_64);
15638 tp->rx_pending = 63;
15641 err = tg3_get_device_address(tp);
15643 dev_err(&pdev->dev,
15644 "Could not obtain valid ethernet address, aborting\n");
15645 goto err_out_apeunmap;
15649 * Reset chip in case UNDI or EFI driver did not shutdown
15650 * DMA self test will enable WDMAC and we'll see (spurious)
15651 * pending DMA on the PCI bus at that point.
15653 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15654 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15655 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15656 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15659 err = tg3_test_dma(tp);
15661 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15662 goto err_out_apeunmap;
15665 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15666 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15667 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15668 for (i = 0; i < tp->irq_max; i++) {
15669 struct tg3_napi *tnapi = &tp->napi[i];
15672 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15674 tnapi->int_mbox = intmbx;
15680 tnapi->consmbox = rcvmbx;
15681 tnapi->prodmbox = sndmbx;
15684 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15686 tnapi->coal_now = HOSTCC_MODE_NOW;
15688 if (!tg3_flag(tp, SUPPORT_MSIX))
15692 * If we support MSIX, we'll be using RSS. If we're using
15693 * RSS, the first vector only handles link interrupts and the
15694 * remaining vectors handle rx and tx interrupts. Reuse the
15695 * mailbox values for the next iteration. The values we setup
15696 * above are still useful for the single vectored mode.
15711 pci_set_drvdata(pdev, dev);
15713 if (tg3_flag(tp, 5717_PLUS)) {
15714 /* Resume a low-power mode */
15715 tg3_frob_aux_power(tp, false);
15718 err = register_netdev(dev);
15720 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15721 goto err_out_apeunmap;
15724 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15725 tp->board_part_number,
15726 tp->pci_chip_rev_id,
15727 tg3_bus_string(tp, str),
15730 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15731 struct phy_device *phydev;
15732 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15734 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15735 phydev->drv->name, dev_name(&phydev->dev));
15739 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15740 ethtype = "10/100Base-TX";
15741 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15742 ethtype = "1000Base-SX";
15744 ethtype = "10/100/1000Base-T";
15746 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15747 "(WireSpeed[%d], EEE[%d])\n",
15748 tg3_phy_string(tp), ethtype,
15749 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15750 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15753 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15754 (dev->features & NETIF_F_RXCSUM) != 0,
15755 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15756 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15757 tg3_flag(tp, ENABLE_ASF) != 0,
15758 tg3_flag(tp, TSO_CAPABLE) != 0);
15759 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15761 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15762 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15764 pci_save_state(pdev);
15770 iounmap(tp->aperegs);
15771 tp->aperegs = NULL;
15783 err_out_power_down:
15784 pci_set_power_state(pdev, PCI_D3hot);
15787 pci_release_regions(pdev);
15789 err_out_disable_pdev:
15790 pci_disable_device(pdev);
15791 pci_set_drvdata(pdev, NULL);
15795 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15797 struct net_device *dev = pci_get_drvdata(pdev);
15800 struct tg3 *tp = netdev_priv(dev);
15803 release_firmware(tp->fw);
15805 tg3_reset_task_cancel(tp);
15807 if (tg3_flag(tp, USE_PHYLIB)) {
15812 unregister_netdev(dev);
15814 iounmap(tp->aperegs);
15815 tp->aperegs = NULL;
15822 pci_release_regions(pdev);
15823 pci_disable_device(pdev);
15824 pci_set_drvdata(pdev, NULL);
15828 #ifdef CONFIG_PM_SLEEP
15829 static int tg3_suspend(struct device *device)
15831 struct pci_dev *pdev = to_pci_dev(device);
15832 struct net_device *dev = pci_get_drvdata(pdev);
15833 struct tg3 *tp = netdev_priv(dev);
15836 if (!netif_running(dev))
15839 tg3_reset_task_cancel(tp);
15841 tg3_netif_stop(tp);
15843 del_timer_sync(&tp->timer);
15845 tg3_full_lock(tp, 1);
15846 tg3_disable_ints(tp);
15847 tg3_full_unlock(tp);
15849 netif_device_detach(dev);
15851 tg3_full_lock(tp, 0);
15852 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15853 tg3_flag_clear(tp, INIT_COMPLETE);
15854 tg3_full_unlock(tp);
15856 err = tg3_power_down_prepare(tp);
15860 tg3_full_lock(tp, 0);
15862 tg3_flag_set(tp, INIT_COMPLETE);
15863 err2 = tg3_restart_hw(tp, 1);
15867 tp->timer.expires = jiffies + tp->timer_offset;
15868 add_timer(&tp->timer);
15870 netif_device_attach(dev);
15871 tg3_netif_start(tp);
15874 tg3_full_unlock(tp);
15883 static int tg3_resume(struct device *device)
15885 struct pci_dev *pdev = to_pci_dev(device);
15886 struct net_device *dev = pci_get_drvdata(pdev);
15887 struct tg3 *tp = netdev_priv(dev);
15890 if (!netif_running(dev))
15893 netif_device_attach(dev);
15895 tg3_full_lock(tp, 0);
15897 tg3_flag_set(tp, INIT_COMPLETE);
15898 err = tg3_restart_hw(tp, 1);
15902 tp->timer.expires = jiffies + tp->timer_offset;
15903 add_timer(&tp->timer);
15905 tg3_netif_start(tp);
15908 tg3_full_unlock(tp);
15916 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15917 #define TG3_PM_OPS (&tg3_pm_ops)
15921 #define TG3_PM_OPS NULL
15923 #endif /* CONFIG_PM_SLEEP */
15926 * tg3_io_error_detected - called when PCI error is detected
15927 * @pdev: Pointer to PCI device
15928 * @state: The current pci connection state
15930 * This function is called after a PCI bus error affecting
15931 * this device has been detected.
15933 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15934 pci_channel_state_t state)
15936 struct net_device *netdev = pci_get_drvdata(pdev);
15937 struct tg3 *tp = netdev_priv(netdev);
15938 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15940 netdev_info(netdev, "PCI I/O error detected\n");
15944 if (!netif_running(netdev))
15949 tg3_netif_stop(tp);
15951 del_timer_sync(&tp->timer);
15953 /* Want to make sure that the reset task doesn't run */
15954 tg3_reset_task_cancel(tp);
15955 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15957 netif_device_detach(netdev);
15959 /* Clean up software state, even if MMIO is blocked */
15960 tg3_full_lock(tp, 0);
15961 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15962 tg3_full_unlock(tp);
15965 if (state == pci_channel_io_perm_failure)
15966 err = PCI_ERS_RESULT_DISCONNECT;
15968 pci_disable_device(pdev);
15976 * tg3_io_slot_reset - called after the pci bus has been reset.
15977 * @pdev: Pointer to PCI device
15979 * Restart the card from scratch, as if from a cold-boot.
15980 * At this point, the card has exprienced a hard reset,
15981 * followed by fixups by BIOS, and has its config space
15982 * set up identically to what it was at cold boot.
15984 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15986 struct net_device *netdev = pci_get_drvdata(pdev);
15987 struct tg3 *tp = netdev_priv(netdev);
15988 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15993 if (pci_enable_device(pdev)) {
15994 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15998 pci_set_master(pdev);
15999 pci_restore_state(pdev);
16000 pci_save_state(pdev);
16002 if (!netif_running(netdev)) {
16003 rc = PCI_ERS_RESULT_RECOVERED;
16007 err = tg3_power_up(tp);
16011 rc = PCI_ERS_RESULT_RECOVERED;
16020 * tg3_io_resume - called when traffic can start flowing again.
16021 * @pdev: Pointer to PCI device
16023 * This callback is called when the error recovery driver tells
16024 * us that its OK to resume normal operation.
16026 static void tg3_io_resume(struct pci_dev *pdev)
16028 struct net_device *netdev = pci_get_drvdata(pdev);
16029 struct tg3 *tp = netdev_priv(netdev);
16034 if (!netif_running(netdev))
16037 tg3_full_lock(tp, 0);
16038 tg3_flag_set(tp, INIT_COMPLETE);
16039 err = tg3_restart_hw(tp, 1);
16040 tg3_full_unlock(tp);
16042 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16046 netif_device_attach(netdev);
16048 tp->timer.expires = jiffies + tp->timer_offset;
16049 add_timer(&tp->timer);
16051 tg3_netif_start(tp);
16059 static struct pci_error_handlers tg3_err_handler = {
16060 .error_detected = tg3_io_error_detected,
16061 .slot_reset = tg3_io_slot_reset,
16062 .resume = tg3_io_resume
16065 static struct pci_driver tg3_driver = {
16066 .name = DRV_MODULE_NAME,
16067 .id_table = tg3_pci_tbl,
16068 .probe = tg3_init_one,
16069 .remove = __devexit_p(tg3_remove_one),
16070 .err_handler = &tg3_err_handler,
16071 .driver.pm = TG3_PM_OPS,
16074 static int __init tg3_init(void)
16076 return pci_register_driver(&tg3_driver);
16079 static void __exit tg3_cleanup(void)
16081 pci_unregister_driver(&tg3_driver);
16084 module_init(tg3_init);
16085 module_exit(tg3_cleanup);