2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 120
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "August 18, 2011"
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
109 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
111 /* length of time before we decide the hardware is borked,
112 * and dev->tx_timeout() should be called to fix the problem
115 #define TG3_TX_TIMEOUT (5 * HZ)
117 /* hardware minimum and maximum for a single frame's data payload */
118 #define TG3_MIN_MTU 60
119 #define TG3_MAX_MTU(tp) \
120 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122 /* These numbers seem to be hard coded in the NIC firmware somehow.
123 * You can't change the ring sizes, but you can change where you place
124 * them in the NIC onboard memory.
126 #define TG3_RX_STD_RING_SIZE(tp) \
127 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
128 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
129 #define TG3_DEF_RX_RING_PENDING 200
130 #define TG3_RX_JMB_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
133 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
134 #define TG3_RSS_INDIR_TBL_SIZE 128
136 /* Do not place this n-ring entries value into the tp struct itself,
137 * we really want to expose these constants to GCC so that modulo et
138 * al. operations are done with shifts and masks instead of with
139 * hw multiply/modulo instructions. Another solution would be to
140 * replace things like '% foo' with '& (foo - 1)'.
143 #define TG3_TX_RING_SIZE 512
144 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
146 #define TG3_RX_STD_RING_BYTES(tp) \
147 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
148 #define TG3_RX_JMB_RING_BYTES(tp) \
149 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
150 #define TG3_RX_RCB_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
152 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156 #define TG3_DMA_BYTE_ENAB 64
158 #define TG3_RX_STD_DMA_SZ 1536
159 #define TG3_RX_JMB_DMA_SZ 9046
161 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
163 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
164 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
167 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
173 * that are at least dword aligned when used in PCIX mode. The driver
174 * works around this bug by double copying the packet. This workaround
175 * is built into the normal double copy length check for efficiency.
177 * However, the double copy is only necessary on those architectures
178 * where unaligned memory accesses are inefficient. For those architectures
179 * where unaligned memory accesses incur little penalty, we can reintegrate
180 * the 5701 in the normal rx path. Doing so saves a device structure
181 * dereference by hardcoding the double copy threshold in place.
183 #define TG3_RX_COPY_THRESHOLD 256
184 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
185 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
190 #if (NET_IP_ALIGN != 0)
191 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
193 #define TG3_RX_OFFSET(tp) 0
196 /* minimum number of free TX descriptors required to wake up TX process */
197 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
198 #define TG3_TX_BD_DMA_MAX 4096
200 #define TG3_RAW_IP_ALIGN 2
202 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
204 #define FIRMWARE_TG3 "tigon/tg3.bin"
205 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
206 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
208 static char version[] __devinitdata =
209 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
211 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
212 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
213 MODULE_LICENSE("GPL");
214 MODULE_VERSION(DRV_MODULE_VERSION);
215 MODULE_FIRMWARE(FIRMWARE_TG3);
216 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
217 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
219 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
220 module_param(tg3_debug, int, 0);
221 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
223 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
297 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
298 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
299 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
300 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
301 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
302 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
303 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
304 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
308 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
310 static const struct {
311 const char string[ETH_GSTRING_LEN];
312 } ethtool_stats_keys[] = {
315 { "rx_ucast_packets" },
316 { "rx_mcast_packets" },
317 { "rx_bcast_packets" },
319 { "rx_align_errors" },
320 { "rx_xon_pause_rcvd" },
321 { "rx_xoff_pause_rcvd" },
322 { "rx_mac_ctrl_rcvd" },
323 { "rx_xoff_entered" },
324 { "rx_frame_too_long_errors" },
326 { "rx_undersize_packets" },
327 { "rx_in_length_errors" },
328 { "rx_out_length_errors" },
329 { "rx_64_or_less_octet_packets" },
330 { "rx_65_to_127_octet_packets" },
331 { "rx_128_to_255_octet_packets" },
332 { "rx_256_to_511_octet_packets" },
333 { "rx_512_to_1023_octet_packets" },
334 { "rx_1024_to_1522_octet_packets" },
335 { "rx_1523_to_2047_octet_packets" },
336 { "rx_2048_to_4095_octet_packets" },
337 { "rx_4096_to_8191_octet_packets" },
338 { "rx_8192_to_9022_octet_packets" },
345 { "tx_flow_control" },
347 { "tx_single_collisions" },
348 { "tx_mult_collisions" },
350 { "tx_excessive_collisions" },
351 { "tx_late_collisions" },
352 { "tx_collide_2times" },
353 { "tx_collide_3times" },
354 { "tx_collide_4times" },
355 { "tx_collide_5times" },
356 { "tx_collide_6times" },
357 { "tx_collide_7times" },
358 { "tx_collide_8times" },
359 { "tx_collide_9times" },
360 { "tx_collide_10times" },
361 { "tx_collide_11times" },
362 { "tx_collide_12times" },
363 { "tx_collide_13times" },
364 { "tx_collide_14times" },
365 { "tx_collide_15times" },
366 { "tx_ucast_packets" },
367 { "tx_mcast_packets" },
368 { "tx_bcast_packets" },
369 { "tx_carrier_sense_errors" },
373 { "dma_writeq_full" },
374 { "dma_write_prioq_full" },
378 { "rx_threshold_hit" },
380 { "dma_readq_full" },
381 { "dma_read_prioq_full" },
382 { "tx_comp_queue_full" },
384 { "ring_set_send_prod_index" },
385 { "ring_status_update" },
387 { "nic_avoided_irqs" },
388 { "nic_tx_threshold_hit" },
390 { "mbuf_lwm_thresh_hit" },
393 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
396 static const struct {
397 const char string[ETH_GSTRING_LEN];
398 } ethtool_test_keys[] = {
399 { "nvram test (online) " },
400 { "link test (online) " },
401 { "register test (offline)" },
402 { "memory test (offline)" },
403 { "mac loopback test (offline)" },
404 { "phy loopback test (offline)" },
405 { "ext loopback test (offline)" },
406 { "interrupt test (offline)" },
409 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
412 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
414 writel(val, tp->regs + off);
417 static u32 tg3_read32(struct tg3 *tp, u32 off)
419 return readl(tp->regs + off);
422 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
424 writel(val, tp->aperegs + off);
427 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
429 return readl(tp->aperegs + off);
432 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
436 spin_lock_irqsave(&tp->indirect_lock, flags);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
438 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
439 spin_unlock_irqrestore(&tp->indirect_lock, flags);
442 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
444 writel(val, tp->regs + off);
445 readl(tp->regs + off);
448 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 spin_lock_irqsave(&tp->indirect_lock, flags);
454 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
455 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
456 spin_unlock_irqrestore(&tp->indirect_lock, flags);
460 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
464 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
465 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
466 TG3_64BIT_REG_LOW, val);
469 if (off == TG3_RX_STD_PROD_IDX_REG) {
470 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
471 TG3_64BIT_REG_LOW, val);
475 spin_lock_irqsave(&tp->indirect_lock, flags);
476 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
477 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
478 spin_unlock_irqrestore(&tp->indirect_lock, flags);
480 /* In indirect mode when disabling interrupts, we also need
481 * to clear the interrupt bit in the GRC local ctrl register.
483 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
485 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
486 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
490 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
497 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 /* usec_wait specifies the wait time in usec when writing to certain registers
503 * where it is unsafe to read back the register without some delay.
504 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
505 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
507 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
509 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
510 /* Non-posted methods */
511 tp->write32(tp, off, val);
514 tg3_write32(tp, off, val);
519 /* Wait again after the read for the posted method to guarantee that
520 * the wait time is met.
526 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
528 tp->write32_mbox(tp, off, val);
529 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
530 tp->read32_mbox(tp, off);
533 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
535 void __iomem *mbox = tp->regs + off;
537 if (tg3_flag(tp, TXD_MBOX_HWBUG))
539 if (tg3_flag(tp, MBOX_WRITE_REORDER))
543 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
545 return readl(tp->regs + off + GRCMBOX_BASE);
548 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
550 writel(val, tp->regs + off + GRCMBOX_BASE);
553 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
554 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
555 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
556 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
557 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
559 #define tw32(reg, val) tp->write32(tp, reg, val)
560 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
561 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
562 #define tr32(reg) tp->read32(tp, reg)
564 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
569 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
572 spin_lock_irqsave(&tp->indirect_lock, flags);
573 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
574 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
575 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
577 /* Always leave this as zero. */
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
580 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
581 tw32_f(TG3PCI_MEM_WIN_DATA, val);
583 /* Always leave this as zero. */
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
586 spin_unlock_irqrestore(&tp->indirect_lock, flags);
589 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
594 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 spin_lock_irqsave(&tp->indirect_lock, flags);
600 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
601 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
602 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
604 /* Always leave this as zero. */
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
607 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
608 *val = tr32(TG3PCI_MEM_WIN_DATA);
610 /* Always leave this as zero. */
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
613 spin_unlock_irqrestore(&tp->indirect_lock, flags);
616 static void tg3_ape_lock_init(struct tg3 *tp)
621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
622 regbase = TG3_APE_LOCK_GRANT;
624 regbase = TG3_APE_PER_LOCK_GRANT;
626 /* Make sure the driver hasn't any stale locks. */
627 for (i = 0; i < 8; i++) {
628 if (i == TG3_APE_LOCK_GPIO)
630 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
633 /* Clear the correct bit of the GPIO lock too. */
635 bit = APE_LOCK_GRANT_DRIVER;
637 bit = 1 << tp->pci_fn;
639 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
642 static int tg3_ape_lock(struct tg3 *tp, int locknum)
646 u32 status, req, gnt, bit;
648 if (!tg3_flag(tp, ENABLE_APE))
652 case TG3_APE_LOCK_GPIO:
653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
655 case TG3_APE_LOCK_GRC:
656 case TG3_APE_LOCK_MEM:
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
663 req = TG3_APE_LOCK_REQ;
664 gnt = TG3_APE_LOCK_GRANT;
666 req = TG3_APE_PER_LOCK_REQ;
667 gnt = TG3_APE_PER_LOCK_GRANT;
672 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
673 bit = APE_LOCK_REQ_DRIVER;
675 bit = 1 << tp->pci_fn;
677 tg3_ape_write32(tp, req + off, bit);
679 /* Wait for up to 1 millisecond to acquire lock. */
680 for (i = 0; i < 100; i++) {
681 status = tg3_ape_read32(tp, gnt + off);
688 /* Revoke the lock request. */
689 tg3_ape_write32(tp, gnt + off, bit);
696 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
700 if (!tg3_flag(tp, ENABLE_APE))
704 case TG3_APE_LOCK_GPIO:
705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707 case TG3_APE_LOCK_GRC:
708 case TG3_APE_LOCK_MEM:
714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
715 gnt = TG3_APE_LOCK_GRANT;
717 gnt = TG3_APE_PER_LOCK_GRANT;
719 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
720 bit = APE_LOCK_GRANT_DRIVER;
722 bit = 1 << tp->pci_fn;
724 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
727 static void tg3_disable_ints(struct tg3 *tp)
731 tw32(TG3PCI_MISC_HOST_CTRL,
732 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
733 for (i = 0; i < tp->irq_max; i++)
734 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
737 static void tg3_enable_ints(struct tg3 *tp)
744 tw32(TG3PCI_MISC_HOST_CTRL,
745 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
747 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
748 for (i = 0; i < tp->irq_cnt; i++) {
749 struct tg3_napi *tnapi = &tp->napi[i];
751 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
752 if (tg3_flag(tp, 1SHOT_MSI))
753 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
755 tp->coal_now |= tnapi->coal_now;
758 /* Force an initial interrupt */
759 if (!tg3_flag(tp, TAGGED_STATUS) &&
760 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
761 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
763 tw32(HOSTCC_MODE, tp->coal_now);
765 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
768 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
770 struct tg3 *tp = tnapi->tp;
771 struct tg3_hw_status *sblk = tnapi->hw_status;
772 unsigned int work_exists = 0;
774 /* check for phy events */
775 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
776 if (sblk->status & SD_STATUS_LINK_CHG)
779 /* check for RX/TX work to do */
780 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
781 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
788 * similar to tg3_enable_ints, but it accurately determines whether there
789 * is new work pending and can return without flushing the PIO write
790 * which reenables interrupts
792 static void tg3_int_reenable(struct tg3_napi *tnapi)
794 struct tg3 *tp = tnapi->tp;
796 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
799 /* When doing tagged status, this work check is unnecessary.
800 * The last_tag we write above tells the chip which piece of
801 * work we've completed.
803 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
804 tw32(HOSTCC_MODE, tp->coalesce_mode |
805 HOSTCC_MODE_ENABLE | tnapi->coal_now);
808 static void tg3_switch_clocks(struct tg3 *tp)
813 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
816 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
818 orig_clock_ctrl = clock_ctrl;
819 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
820 CLOCK_CTRL_CLKRUN_OENABLE |
822 tp->pci_clock_ctrl = clock_ctrl;
824 if (tg3_flag(tp, 5705_PLUS)) {
825 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
829 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
830 tw32_wait_f(TG3PCI_CLOCK_CTRL,
832 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
834 tw32_wait_f(TG3PCI_CLOCK_CTRL,
835 clock_ctrl | (CLOCK_CTRL_ALTCLK),
838 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
841 #define PHY_BUSY_LOOPS 5000
843 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
849 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
851 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
857 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
858 MI_COM_PHY_ADDR_MASK);
859 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
860 MI_COM_REG_ADDR_MASK);
861 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
863 tw32_f(MAC_MI_COM, frame_val);
865 loops = PHY_BUSY_LOOPS;
868 frame_val = tr32(MAC_MI_COM);
870 if ((frame_val & MI_COM_BUSY) == 0) {
872 frame_val = tr32(MAC_MI_COM);
880 *val = frame_val & MI_COM_DATA_MASK;
884 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
885 tw32_f(MAC_MI_MODE, tp->mi_mode);
892 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
898 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
899 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
902 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
904 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
908 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
909 MI_COM_PHY_ADDR_MASK);
910 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
911 MI_COM_REG_ADDR_MASK);
912 frame_val |= (val & MI_COM_DATA_MASK);
913 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
915 tw32_f(MAC_MI_COM, frame_val);
917 loops = PHY_BUSY_LOOPS;
920 frame_val = tr32(MAC_MI_COM);
921 if ((frame_val & MI_COM_BUSY) == 0) {
923 frame_val = tr32(MAC_MI_COM);
933 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
934 tw32_f(MAC_MI_MODE, tp->mi_mode);
941 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
945 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
949 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
953 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
954 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
958 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
964 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
968 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
972 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
976 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
977 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
981 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
987 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
991 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
993 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
998 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1002 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1004 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1009 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1013 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1014 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1015 MII_TG3_AUXCTL_SHDWSEL_MISC);
1017 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1022 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1024 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1025 set |= MII_TG3_AUXCTL_MISC_WREN;
1027 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1030 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1031 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1032 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1033 MII_TG3_AUXCTL_ACTL_TX_6DB)
1035 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1036 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1037 MII_TG3_AUXCTL_ACTL_TX_6DB);
1039 static int tg3_bmcr_reset(struct tg3 *tp)
1044 /* OK, reset it, and poll the BMCR_RESET bit until it
1045 * clears or we time out.
1047 phy_control = BMCR_RESET;
1048 err = tg3_writephy(tp, MII_BMCR, phy_control);
1054 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1058 if ((phy_control & BMCR_RESET) == 0) {
1070 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1072 struct tg3 *tp = bp->priv;
1075 spin_lock_bh(&tp->lock);
1077 if (tg3_readphy(tp, reg, &val))
1080 spin_unlock_bh(&tp->lock);
1085 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1087 struct tg3 *tp = bp->priv;
1090 spin_lock_bh(&tp->lock);
1092 if (tg3_writephy(tp, reg, val))
1095 spin_unlock_bh(&tp->lock);
1100 static int tg3_mdio_reset(struct mii_bus *bp)
1105 static void tg3_mdio_config_5785(struct tg3 *tp)
1108 struct phy_device *phydev;
1110 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1111 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1112 case PHY_ID_BCM50610:
1113 case PHY_ID_BCM50610M:
1114 val = MAC_PHYCFG2_50610_LED_MODES;
1116 case PHY_ID_BCMAC131:
1117 val = MAC_PHYCFG2_AC131_LED_MODES;
1119 case PHY_ID_RTL8211C:
1120 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1122 case PHY_ID_RTL8201E:
1123 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1129 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1130 tw32(MAC_PHYCFG2, val);
1132 val = tr32(MAC_PHYCFG1);
1133 val &= ~(MAC_PHYCFG1_RGMII_INT |
1134 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1135 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1136 tw32(MAC_PHYCFG1, val);
1141 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1142 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1143 MAC_PHYCFG2_FMODE_MASK_MASK |
1144 MAC_PHYCFG2_GMODE_MASK_MASK |
1145 MAC_PHYCFG2_ACT_MASK_MASK |
1146 MAC_PHYCFG2_QUAL_MASK_MASK |
1147 MAC_PHYCFG2_INBAND_ENABLE;
1149 tw32(MAC_PHYCFG2, val);
1151 val = tr32(MAC_PHYCFG1);
1152 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1153 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1154 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1155 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1156 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1157 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1158 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1160 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1161 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1162 tw32(MAC_PHYCFG1, val);
1164 val = tr32(MAC_EXT_RGMII_MODE);
1165 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1166 MAC_RGMII_MODE_RX_QUALITY |
1167 MAC_RGMII_MODE_RX_ACTIVITY |
1168 MAC_RGMII_MODE_RX_ENG_DET |
1169 MAC_RGMII_MODE_TX_ENABLE |
1170 MAC_RGMII_MODE_TX_LOWPWR |
1171 MAC_RGMII_MODE_TX_RESET);
1172 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1173 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1174 val |= MAC_RGMII_MODE_RX_INT_B |
1175 MAC_RGMII_MODE_RX_QUALITY |
1176 MAC_RGMII_MODE_RX_ACTIVITY |
1177 MAC_RGMII_MODE_RX_ENG_DET;
1178 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1179 val |= MAC_RGMII_MODE_TX_ENABLE |
1180 MAC_RGMII_MODE_TX_LOWPWR |
1181 MAC_RGMII_MODE_TX_RESET;
1183 tw32(MAC_EXT_RGMII_MODE, val);
1186 static void tg3_mdio_start(struct tg3 *tp)
1188 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1189 tw32_f(MAC_MI_MODE, tp->mi_mode);
1192 if (tg3_flag(tp, MDIOBUS_INITED) &&
1193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1194 tg3_mdio_config_5785(tp);
1197 static int tg3_mdio_init(struct tg3 *tp)
1201 struct phy_device *phydev;
1203 if (tg3_flag(tp, 5717_PLUS)) {
1206 tp->phy_addr = tp->pci_fn + 1;
1208 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1209 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1211 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1212 TG3_CPMU_PHY_STRAP_IS_SERDES;
1216 tp->phy_addr = TG3_PHY_MII_ADDR;
1220 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1223 tp->mdio_bus = mdiobus_alloc();
1224 if (tp->mdio_bus == NULL)
1227 tp->mdio_bus->name = "tg3 mdio bus";
1228 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1229 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1230 tp->mdio_bus->priv = tp;
1231 tp->mdio_bus->parent = &tp->pdev->dev;
1232 tp->mdio_bus->read = &tg3_mdio_read;
1233 tp->mdio_bus->write = &tg3_mdio_write;
1234 tp->mdio_bus->reset = &tg3_mdio_reset;
1235 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1236 tp->mdio_bus->irq = &tp->mdio_irq[0];
1238 for (i = 0; i < PHY_MAX_ADDR; i++)
1239 tp->mdio_bus->irq[i] = PHY_POLL;
1241 /* The bus registration will look for all the PHYs on the mdio bus.
1242 * Unfortunately, it does not ensure the PHY is powered up before
1243 * accessing the PHY ID registers. A chip reset is the
1244 * quickest way to bring the device back to an operational state..
1246 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1249 i = mdiobus_register(tp->mdio_bus);
1251 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1252 mdiobus_free(tp->mdio_bus);
1256 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1258 if (!phydev || !phydev->drv) {
1259 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1260 mdiobus_unregister(tp->mdio_bus);
1261 mdiobus_free(tp->mdio_bus);
1265 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1266 case PHY_ID_BCM57780:
1267 phydev->interface = PHY_INTERFACE_MODE_GMII;
1268 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1270 case PHY_ID_BCM50610:
1271 case PHY_ID_BCM50610M:
1272 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1273 PHY_BRCM_RX_REFCLK_UNUSED |
1274 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1275 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1276 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1277 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1278 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1279 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1280 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1281 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1283 case PHY_ID_RTL8211C:
1284 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1286 case PHY_ID_RTL8201E:
1287 case PHY_ID_BCMAC131:
1288 phydev->interface = PHY_INTERFACE_MODE_MII;
1289 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1290 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1294 tg3_flag_set(tp, MDIOBUS_INITED);
1296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 tg3_mdio_config_5785(tp);
1302 static void tg3_mdio_fini(struct tg3 *tp)
1304 if (tg3_flag(tp, MDIOBUS_INITED)) {
1305 tg3_flag_clear(tp, MDIOBUS_INITED);
1306 mdiobus_unregister(tp->mdio_bus);
1307 mdiobus_free(tp->mdio_bus);
1311 /* tp->lock is held. */
1312 static inline void tg3_generate_fw_event(struct tg3 *tp)
1316 val = tr32(GRC_RX_CPU_EVENT);
1317 val |= GRC_RX_CPU_DRIVER_EVENT;
1318 tw32_f(GRC_RX_CPU_EVENT, val);
1320 tp->last_event_jiffies = jiffies;
1323 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1325 /* tp->lock is held. */
1326 static void tg3_wait_for_event_ack(struct tg3 *tp)
1329 unsigned int delay_cnt;
1332 /* If enough time has passed, no wait is necessary. */
1333 time_remain = (long)(tp->last_event_jiffies + 1 +
1334 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1336 if (time_remain < 0)
1339 /* Check if we can shorten the wait time. */
1340 delay_cnt = jiffies_to_usecs(time_remain);
1341 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1342 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1343 delay_cnt = (delay_cnt >> 3) + 1;
1345 for (i = 0; i < delay_cnt; i++) {
1346 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1352 /* tp->lock is held. */
1353 static void tg3_ump_link_report(struct tg3 *tp)
1358 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1361 tg3_wait_for_event_ack(tp);
1363 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1365 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1368 if (!tg3_readphy(tp, MII_BMCR, ®))
1370 if (!tg3_readphy(tp, MII_BMSR, ®))
1371 val |= (reg & 0xffff);
1372 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1375 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1377 if (!tg3_readphy(tp, MII_LPA, ®))
1378 val |= (reg & 0xffff);
1379 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1382 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1383 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1385 if (!tg3_readphy(tp, MII_STAT1000, ®))
1386 val |= (reg & 0xffff);
1388 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1390 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1394 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1396 tg3_generate_fw_event(tp);
1399 static void tg3_link_report(struct tg3 *tp)
1401 if (!netif_carrier_ok(tp->dev)) {
1402 netif_info(tp, link, tp->dev, "Link is down\n");
1403 tg3_ump_link_report(tp);
1404 } else if (netif_msg_link(tp)) {
1405 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1406 (tp->link_config.active_speed == SPEED_1000 ?
1408 (tp->link_config.active_speed == SPEED_100 ?
1410 (tp->link_config.active_duplex == DUPLEX_FULL ?
1413 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1414 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1416 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1419 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1420 netdev_info(tp->dev, "EEE is %s\n",
1421 tp->setlpicnt ? "enabled" : "disabled");
1423 tg3_ump_link_report(tp);
1427 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1431 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1432 miireg = ADVERTISE_PAUSE_CAP;
1433 else if (flow_ctrl & FLOW_CTRL_TX)
1434 miireg = ADVERTISE_PAUSE_ASYM;
1435 else if (flow_ctrl & FLOW_CTRL_RX)
1436 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1443 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1447 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1448 miireg = ADVERTISE_1000XPAUSE;
1449 else if (flow_ctrl & FLOW_CTRL_TX)
1450 miireg = ADVERTISE_1000XPSE_ASYM;
1451 else if (flow_ctrl & FLOW_CTRL_RX)
1452 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1459 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1463 if (lcladv & ADVERTISE_1000XPAUSE) {
1464 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1465 if (rmtadv & LPA_1000XPAUSE)
1466 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1467 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1470 if (rmtadv & LPA_1000XPAUSE)
1471 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1473 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1474 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1481 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1485 u32 old_rx_mode = tp->rx_mode;
1486 u32 old_tx_mode = tp->tx_mode;
1488 if (tg3_flag(tp, USE_PHYLIB))
1489 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1491 autoneg = tp->link_config.autoneg;
1493 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1494 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1495 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1497 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1499 flowctrl = tp->link_config.flowctrl;
1501 tp->link_config.active_flowctrl = flowctrl;
1503 if (flowctrl & FLOW_CTRL_RX)
1504 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1506 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1508 if (old_rx_mode != tp->rx_mode)
1509 tw32_f(MAC_RX_MODE, tp->rx_mode);
1511 if (flowctrl & FLOW_CTRL_TX)
1512 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1514 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1516 if (old_tx_mode != tp->tx_mode)
1517 tw32_f(MAC_TX_MODE, tp->tx_mode);
1520 static void tg3_adjust_link(struct net_device *dev)
1522 u8 oldflowctrl, linkmesg = 0;
1523 u32 mac_mode, lcl_adv, rmt_adv;
1524 struct tg3 *tp = netdev_priv(dev);
1525 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1527 spin_lock_bh(&tp->lock);
1529 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1530 MAC_MODE_HALF_DUPLEX);
1532 oldflowctrl = tp->link_config.active_flowctrl;
1538 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1539 mac_mode |= MAC_MODE_PORT_MODE_MII;
1540 else if (phydev->speed == SPEED_1000 ||
1541 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1542 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1544 mac_mode |= MAC_MODE_PORT_MODE_MII;
1546 if (phydev->duplex == DUPLEX_HALF)
1547 mac_mode |= MAC_MODE_HALF_DUPLEX;
1549 lcl_adv = tg3_advert_flowctrl_1000T(
1550 tp->link_config.flowctrl);
1553 rmt_adv = LPA_PAUSE_CAP;
1554 if (phydev->asym_pause)
1555 rmt_adv |= LPA_PAUSE_ASYM;
1558 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1560 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1562 if (mac_mode != tp->mac_mode) {
1563 tp->mac_mode = mac_mode;
1564 tw32_f(MAC_MODE, tp->mac_mode);
1568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1569 if (phydev->speed == SPEED_10)
1571 MAC_MI_STAT_10MBPS_MODE |
1572 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1574 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1577 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1578 tw32(MAC_TX_LENGTHS,
1579 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1580 (6 << TX_LENGTHS_IPG_SHIFT) |
1581 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1583 tw32(MAC_TX_LENGTHS,
1584 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1585 (6 << TX_LENGTHS_IPG_SHIFT) |
1586 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1588 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1589 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1590 phydev->speed != tp->link_config.active_speed ||
1591 phydev->duplex != tp->link_config.active_duplex ||
1592 oldflowctrl != tp->link_config.active_flowctrl)
1595 tp->link_config.active_speed = phydev->speed;
1596 tp->link_config.active_duplex = phydev->duplex;
1598 spin_unlock_bh(&tp->lock);
1601 tg3_link_report(tp);
1604 static int tg3_phy_init(struct tg3 *tp)
1606 struct phy_device *phydev;
1608 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1611 /* Bring the PHY back to a known state. */
1614 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1616 /* Attach the MAC to the PHY. */
1617 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1618 phydev->dev_flags, phydev->interface);
1619 if (IS_ERR(phydev)) {
1620 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1621 return PTR_ERR(phydev);
1624 /* Mask with MAC supported features. */
1625 switch (phydev->interface) {
1626 case PHY_INTERFACE_MODE_GMII:
1627 case PHY_INTERFACE_MODE_RGMII:
1628 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1629 phydev->supported &= (PHY_GBIT_FEATURES |
1631 SUPPORTED_Asym_Pause);
1635 case PHY_INTERFACE_MODE_MII:
1636 phydev->supported &= (PHY_BASIC_FEATURES |
1638 SUPPORTED_Asym_Pause);
1641 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1647 phydev->advertising = phydev->supported;
1652 static void tg3_phy_start(struct tg3 *tp)
1654 struct phy_device *phydev;
1656 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1659 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1661 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1662 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1663 phydev->speed = tp->link_config.orig_speed;
1664 phydev->duplex = tp->link_config.orig_duplex;
1665 phydev->autoneg = tp->link_config.orig_autoneg;
1666 phydev->advertising = tp->link_config.orig_advertising;
1671 phy_start_aneg(phydev);
1674 static void tg3_phy_stop(struct tg3 *tp)
1676 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1679 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1682 static void tg3_phy_fini(struct tg3 *tp)
1684 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1685 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1686 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1690 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1695 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1698 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1699 /* Cannot do read-modify-write on 5401 */
1700 err = tg3_phy_auxctl_write(tp,
1701 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1702 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1707 err = tg3_phy_auxctl_read(tp,
1708 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1712 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1713 err = tg3_phy_auxctl_write(tp,
1714 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1720 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1724 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1727 tg3_writephy(tp, MII_TG3_FET_TEST,
1728 phytest | MII_TG3_FET_SHADOW_EN);
1729 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1731 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1733 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1734 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1736 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1740 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1744 if (!tg3_flag(tp, 5705_PLUS) ||
1745 (tg3_flag(tp, 5717_PLUS) &&
1746 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1749 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1750 tg3_phy_fet_toggle_apd(tp, enable);
1754 reg = MII_TG3_MISC_SHDW_WREN |
1755 MII_TG3_MISC_SHDW_SCR5_SEL |
1756 MII_TG3_MISC_SHDW_SCR5_LPED |
1757 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1758 MII_TG3_MISC_SHDW_SCR5_SDTL |
1759 MII_TG3_MISC_SHDW_SCR5_C125OE;
1760 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1761 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1763 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1766 reg = MII_TG3_MISC_SHDW_WREN |
1767 MII_TG3_MISC_SHDW_APD_SEL |
1768 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1770 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1772 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1775 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1779 if (!tg3_flag(tp, 5705_PLUS) ||
1780 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1783 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1786 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1787 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1789 tg3_writephy(tp, MII_TG3_FET_TEST,
1790 ephy | MII_TG3_FET_SHADOW_EN);
1791 if (!tg3_readphy(tp, reg, &phy)) {
1793 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1795 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1796 tg3_writephy(tp, reg, phy);
1798 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1803 ret = tg3_phy_auxctl_read(tp,
1804 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1807 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1809 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1810 tg3_phy_auxctl_write(tp,
1811 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1816 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1821 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1824 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1826 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1827 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1830 static void tg3_phy_apply_otp(struct tg3 *tp)
1839 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1842 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1843 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1844 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1846 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1847 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1848 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1850 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1851 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1852 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1854 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1855 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1857 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1858 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1860 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1861 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1862 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1864 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1867 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1871 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1876 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1877 current_link_up == 1 &&
1878 tp->link_config.active_duplex == DUPLEX_FULL &&
1879 (tp->link_config.active_speed == SPEED_100 ||
1880 tp->link_config.active_speed == SPEED_1000)) {
1883 if (tp->link_config.active_speed == SPEED_1000)
1884 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1886 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1888 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1890 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1891 TG3_CL45_D7_EEERES_STAT, &val);
1893 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1894 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1898 if (!tp->setlpicnt) {
1899 if (current_link_up == 1 &&
1900 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1901 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1902 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1905 val = tr32(TG3_CPMU_EEE_MODE);
1906 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1910 static void tg3_phy_eee_enable(struct tg3 *tp)
1914 if (tp->link_config.active_speed == SPEED_1000 &&
1915 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1917 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1918 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1919 val = MII_TG3_DSP_TAP26_ALNOKO |
1920 MII_TG3_DSP_TAP26_RMRXSTO;
1921 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1922 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1925 val = tr32(TG3_CPMU_EEE_MODE);
1926 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1929 static int tg3_wait_macro_done(struct tg3 *tp)
1936 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1937 if ((tmp32 & 0x1000) == 0)
1947 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1949 static const u32 test_pat[4][6] = {
1950 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1951 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1952 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1953 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1957 for (chan = 0; chan < 4; chan++) {
1960 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1961 (chan * 0x2000) | 0x0200);
1962 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1964 for (i = 0; i < 6; i++)
1965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1968 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1969 if (tg3_wait_macro_done(tp)) {
1974 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1975 (chan * 0x2000) | 0x0200);
1976 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1977 if (tg3_wait_macro_done(tp)) {
1982 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1983 if (tg3_wait_macro_done(tp)) {
1988 for (i = 0; i < 6; i += 2) {
1991 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1992 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1993 tg3_wait_macro_done(tp)) {
1999 if (low != test_pat[chan][i] ||
2000 high != test_pat[chan][i+1]) {
2001 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2002 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2003 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2013 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2017 for (chan = 0; chan < 4; chan++) {
2020 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2021 (chan * 0x2000) | 0x0200);
2022 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2023 for (i = 0; i < 6; i++)
2024 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2025 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2026 if (tg3_wait_macro_done(tp))
2033 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2035 u32 reg32, phy9_orig;
2036 int retries, do_phy_reset, err;
2042 err = tg3_bmcr_reset(tp);
2048 /* Disable transmitter and interrupt. */
2049 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2053 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2055 /* Set full-duplex, 1000 mbps. */
2056 tg3_writephy(tp, MII_BMCR,
2057 BMCR_FULLDPLX | BMCR_SPEED1000);
2059 /* Set to master mode. */
2060 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2063 tg3_writephy(tp, MII_CTRL1000,
2064 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2066 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2070 /* Block the PHY control access. */
2071 tg3_phydsp_write(tp, 0x8005, 0x0800);
2073 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2076 } while (--retries);
2078 err = tg3_phy_reset_chanpat(tp);
2082 tg3_phydsp_write(tp, 0x8005, 0x0000);
2084 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2085 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2087 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2091 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2093 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2100 /* This will reset the tigon3 PHY if there is no valid
2101 * link unless the FORCE argument is non-zero.
2103 static int tg3_phy_reset(struct tg3 *tp)
2108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2109 val = tr32(GRC_MISC_CFG);
2110 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2113 err = tg3_readphy(tp, MII_BMSR, &val);
2114 err |= tg3_readphy(tp, MII_BMSR, &val);
2118 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2119 netif_carrier_off(tp->dev);
2120 tg3_link_report(tp);
2123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2125 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2126 err = tg3_phy_reset_5703_4_5(tp);
2133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2134 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2135 cpmuctrl = tr32(TG3_CPMU_CTRL);
2136 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2138 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2141 err = tg3_bmcr_reset(tp);
2145 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2146 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2147 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2149 tw32(TG3_CPMU_CTRL, cpmuctrl);
2152 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2153 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2154 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2155 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2156 CPMU_LSPD_1000MB_MACCLK_12_5) {
2157 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2159 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2163 if (tg3_flag(tp, 5717_PLUS) &&
2164 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2167 tg3_phy_apply_otp(tp);
2169 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2170 tg3_phy_toggle_apd(tp, true);
2172 tg3_phy_toggle_apd(tp, false);
2175 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2176 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2177 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2178 tg3_phydsp_write(tp, 0x000a, 0x0323);
2179 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2182 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2183 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2184 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2187 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2188 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2189 tg3_phydsp_write(tp, 0x000a, 0x310b);
2190 tg3_phydsp_write(tp, 0x201f, 0x9506);
2191 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2192 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2194 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2195 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2196 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2197 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2198 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2199 tg3_writephy(tp, MII_TG3_TEST1,
2200 MII_TG3_TEST1_TRIM_EN | 0x4);
2202 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2204 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2208 /* Set Extended packet length bit (bit 14) on all chips that */
2209 /* support jumbo frames */
2210 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2211 /* Cannot do read-modify-write on 5401 */
2212 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2213 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2214 /* Set bit 14 with read-modify-write to preserve other bits */
2215 err = tg3_phy_auxctl_read(tp,
2216 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2218 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2219 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2222 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2223 * jumbo frames transmission.
2225 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2226 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2227 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2228 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2232 /* adjust output voltage */
2233 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2236 tg3_phy_toggle_automdix(tp, 1);
2237 tg3_phy_set_wirespeed(tp);
2241 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2242 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2243 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2244 TG3_GPIO_MSG_NEED_VAUX)
2245 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2246 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2247 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2248 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2249 (TG3_GPIO_MSG_DRVR_PRES << 12))
2251 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2252 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2253 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2254 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2255 (TG3_GPIO_MSG_NEED_VAUX << 12))
2257 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2262 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2263 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2265 status = tr32(TG3_CPMU_DRV_STATUS);
2267 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2268 status &= ~(TG3_GPIO_MSG_MASK << shift);
2269 status |= (newstat << shift);
2271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2273 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2275 tw32(TG3_CPMU_DRV_STATUS, status);
2277 return status >> TG3_APE_GPIO_MSG_SHIFT;
2280 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2282 if (!tg3_flag(tp, IS_NIC))
2285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2288 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2291 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2293 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2294 TG3_GRC_LCLCTL_PWRSW_DELAY);
2296 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2298 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2299 TG3_GRC_LCLCTL_PWRSW_DELAY);
2305 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2309 if (!tg3_flag(tp, IS_NIC) ||
2310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2314 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2316 tw32_wait_f(GRC_LOCAL_CTRL,
2317 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2318 TG3_GRC_LCLCTL_PWRSW_DELAY);
2320 tw32_wait_f(GRC_LOCAL_CTRL,
2322 TG3_GRC_LCLCTL_PWRSW_DELAY);
2324 tw32_wait_f(GRC_LOCAL_CTRL,
2325 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2326 TG3_GRC_LCLCTL_PWRSW_DELAY);
2329 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2331 if (!tg3_flag(tp, IS_NIC))
2334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2336 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2337 (GRC_LCLCTRL_GPIO_OE0 |
2338 GRC_LCLCTRL_GPIO_OE1 |
2339 GRC_LCLCTRL_GPIO_OE2 |
2340 GRC_LCLCTRL_GPIO_OUTPUT0 |
2341 GRC_LCLCTRL_GPIO_OUTPUT1),
2342 TG3_GRC_LCLCTL_PWRSW_DELAY);
2343 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2344 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2345 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2346 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2347 GRC_LCLCTRL_GPIO_OE1 |
2348 GRC_LCLCTRL_GPIO_OE2 |
2349 GRC_LCLCTRL_GPIO_OUTPUT0 |
2350 GRC_LCLCTRL_GPIO_OUTPUT1 |
2352 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2353 TG3_GRC_LCLCTL_PWRSW_DELAY);
2355 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2356 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2357 TG3_GRC_LCLCTL_PWRSW_DELAY);
2359 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2360 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2361 TG3_GRC_LCLCTL_PWRSW_DELAY);
2364 u32 grc_local_ctrl = 0;
2366 /* Workaround to prevent overdrawing Amps. */
2367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2368 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2369 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2371 TG3_GRC_LCLCTL_PWRSW_DELAY);
2374 /* On 5753 and variants, GPIO2 cannot be used. */
2375 no_gpio2 = tp->nic_sram_data_cfg &
2376 NIC_SRAM_DATA_CFG_NO_GPIO2;
2378 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2379 GRC_LCLCTRL_GPIO_OE1 |
2380 GRC_LCLCTRL_GPIO_OE2 |
2381 GRC_LCLCTRL_GPIO_OUTPUT1 |
2382 GRC_LCLCTRL_GPIO_OUTPUT2;
2384 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2385 GRC_LCLCTRL_GPIO_OUTPUT2);
2387 tw32_wait_f(GRC_LOCAL_CTRL,
2388 tp->grc_local_ctrl | grc_local_ctrl,
2389 TG3_GRC_LCLCTL_PWRSW_DELAY);
2391 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2393 tw32_wait_f(GRC_LOCAL_CTRL,
2394 tp->grc_local_ctrl | grc_local_ctrl,
2395 TG3_GRC_LCLCTL_PWRSW_DELAY);
2398 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2399 tw32_wait_f(GRC_LOCAL_CTRL,
2400 tp->grc_local_ctrl | grc_local_ctrl,
2401 TG3_GRC_LCLCTL_PWRSW_DELAY);
2406 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2410 /* Serialize power state transitions */
2411 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2414 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2415 msg = TG3_GPIO_MSG_NEED_VAUX;
2417 msg = tg3_set_function_status(tp, msg);
2419 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2422 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2423 tg3_pwrsrc_switch_to_vaux(tp);
2425 tg3_pwrsrc_die_with_vmain(tp);
2428 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2431 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2433 bool need_vaux = false;
2435 /* The GPIOs do something completely different on 57765. */
2436 if (!tg3_flag(tp, IS_NIC) ||
2437 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2443 tg3_frob_aux_power_5717(tp, include_wol ?
2444 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2448 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2449 struct net_device *dev_peer;
2451 dev_peer = pci_get_drvdata(tp->pdev_peer);
2453 /* remove_one() may have been run on the peer. */
2455 struct tg3 *tp_peer = netdev_priv(dev_peer);
2457 if (tg3_flag(tp_peer, INIT_COMPLETE))
2460 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2461 tg3_flag(tp_peer, ENABLE_ASF))
2466 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2467 tg3_flag(tp, ENABLE_ASF))
2471 tg3_pwrsrc_switch_to_vaux(tp);
2473 tg3_pwrsrc_die_with_vmain(tp);
2476 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2478 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2480 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2481 if (speed != SPEED_10)
2483 } else if (speed == SPEED_10)
2489 static int tg3_setup_phy(struct tg3 *, int);
2491 #define RESET_KIND_SHUTDOWN 0
2492 #define RESET_KIND_INIT 1
2493 #define RESET_KIND_SUSPEND 2
2495 static void tg3_write_sig_post_reset(struct tg3 *, int);
2496 static int tg3_halt_cpu(struct tg3 *, u32);
2498 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2502 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2504 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2505 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2508 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2509 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2510 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2517 val = tr32(GRC_MISC_CFG);
2518 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2521 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2523 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2526 tg3_writephy(tp, MII_ADVERTISE, 0);
2527 tg3_writephy(tp, MII_BMCR,
2528 BMCR_ANENABLE | BMCR_ANRESTART);
2530 tg3_writephy(tp, MII_TG3_FET_TEST,
2531 phytest | MII_TG3_FET_SHADOW_EN);
2532 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2533 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2535 MII_TG3_FET_SHDW_AUXMODE4,
2538 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2541 } else if (do_low_power) {
2542 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2543 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2545 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2546 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2547 MII_TG3_AUXCTL_PCTL_VREG_11V;
2548 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2551 /* The PHY should not be powered down on some chips because
2554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2556 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2557 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2560 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2561 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2562 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2563 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2564 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2565 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2568 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2571 /* tp->lock is held. */
2572 static int tg3_nvram_lock(struct tg3 *tp)
2574 if (tg3_flag(tp, NVRAM)) {
2577 if (tp->nvram_lock_cnt == 0) {
2578 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2579 for (i = 0; i < 8000; i++) {
2580 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2585 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2589 tp->nvram_lock_cnt++;
2594 /* tp->lock is held. */
2595 static void tg3_nvram_unlock(struct tg3 *tp)
2597 if (tg3_flag(tp, NVRAM)) {
2598 if (tp->nvram_lock_cnt > 0)
2599 tp->nvram_lock_cnt--;
2600 if (tp->nvram_lock_cnt == 0)
2601 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2605 /* tp->lock is held. */
2606 static void tg3_enable_nvram_access(struct tg3 *tp)
2608 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2609 u32 nvaccess = tr32(NVRAM_ACCESS);
2611 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2615 /* tp->lock is held. */
2616 static void tg3_disable_nvram_access(struct tg3 *tp)
2618 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2619 u32 nvaccess = tr32(NVRAM_ACCESS);
2621 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2625 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2626 u32 offset, u32 *val)
2631 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2634 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2635 EEPROM_ADDR_DEVID_MASK |
2637 tw32(GRC_EEPROM_ADDR,
2639 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2640 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2641 EEPROM_ADDR_ADDR_MASK) |
2642 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2644 for (i = 0; i < 1000; i++) {
2645 tmp = tr32(GRC_EEPROM_ADDR);
2647 if (tmp & EEPROM_ADDR_COMPLETE)
2651 if (!(tmp & EEPROM_ADDR_COMPLETE))
2654 tmp = tr32(GRC_EEPROM_DATA);
2657 * The data will always be opposite the native endian
2658 * format. Perform a blind byteswap to compensate.
2665 #define NVRAM_CMD_TIMEOUT 10000
2667 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2671 tw32(NVRAM_CMD, nvram_cmd);
2672 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2674 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2680 if (i == NVRAM_CMD_TIMEOUT)
2686 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2688 if (tg3_flag(tp, NVRAM) &&
2689 tg3_flag(tp, NVRAM_BUFFERED) &&
2690 tg3_flag(tp, FLASH) &&
2691 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2692 (tp->nvram_jedecnum == JEDEC_ATMEL))
2694 addr = ((addr / tp->nvram_pagesize) <<
2695 ATMEL_AT45DB0X1B_PAGE_POS) +
2696 (addr % tp->nvram_pagesize);
2701 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2703 if (tg3_flag(tp, NVRAM) &&
2704 tg3_flag(tp, NVRAM_BUFFERED) &&
2705 tg3_flag(tp, FLASH) &&
2706 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2707 (tp->nvram_jedecnum == JEDEC_ATMEL))
2709 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2710 tp->nvram_pagesize) +
2711 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2716 /* NOTE: Data read in from NVRAM is byteswapped according to
2717 * the byteswapping settings for all other register accesses.
2718 * tg3 devices are BE devices, so on a BE machine, the data
2719 * returned will be exactly as it is seen in NVRAM. On a LE
2720 * machine, the 32-bit value will be byteswapped.
2722 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2726 if (!tg3_flag(tp, NVRAM))
2727 return tg3_nvram_read_using_eeprom(tp, offset, val);
2729 offset = tg3_nvram_phys_addr(tp, offset);
2731 if (offset > NVRAM_ADDR_MSK)
2734 ret = tg3_nvram_lock(tp);
2738 tg3_enable_nvram_access(tp);
2740 tw32(NVRAM_ADDR, offset);
2741 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2742 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2745 *val = tr32(NVRAM_RDDATA);
2747 tg3_disable_nvram_access(tp);
2749 tg3_nvram_unlock(tp);
2754 /* Ensures NVRAM data is in bytestream format. */
2755 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2758 int res = tg3_nvram_read(tp, offset, &v);
2760 *val = cpu_to_be32(v);
2764 /* tp->lock is held. */
2765 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2767 u32 addr_high, addr_low;
2770 addr_high = ((tp->dev->dev_addr[0] << 8) |
2771 tp->dev->dev_addr[1]);
2772 addr_low = ((tp->dev->dev_addr[2] << 24) |
2773 (tp->dev->dev_addr[3] << 16) |
2774 (tp->dev->dev_addr[4] << 8) |
2775 (tp->dev->dev_addr[5] << 0));
2776 for (i = 0; i < 4; i++) {
2777 if (i == 1 && skip_mac_1)
2779 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2780 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2785 for (i = 0; i < 12; i++) {
2786 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2787 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2791 addr_high = (tp->dev->dev_addr[0] +
2792 tp->dev->dev_addr[1] +
2793 tp->dev->dev_addr[2] +
2794 tp->dev->dev_addr[3] +
2795 tp->dev->dev_addr[4] +
2796 tp->dev->dev_addr[5]) &
2797 TX_BACKOFF_SEED_MASK;
2798 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2801 static void tg3_enable_register_access(struct tg3 *tp)
2804 * Make sure register accesses (indirect or otherwise) will function
2807 pci_write_config_dword(tp->pdev,
2808 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2811 static int tg3_power_up(struct tg3 *tp)
2815 tg3_enable_register_access(tp);
2817 err = pci_set_power_state(tp->pdev, PCI_D0);
2819 /* Switch out of Vaux if it is a NIC */
2820 tg3_pwrsrc_switch_to_vmain(tp);
2822 netdev_err(tp->dev, "Transition to D0 failed\n");
2828 static int tg3_power_down_prepare(struct tg3 *tp)
2831 bool device_should_wake, do_low_power;
2833 tg3_enable_register_access(tp);
2835 /* Restore the CLKREQ setting. */
2836 if (tg3_flag(tp, CLKREQ_BUG)) {
2839 pci_read_config_word(tp->pdev,
2840 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2842 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2843 pci_write_config_word(tp->pdev,
2844 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2848 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2849 tw32(TG3PCI_MISC_HOST_CTRL,
2850 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2852 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2853 tg3_flag(tp, WOL_ENABLE);
2855 if (tg3_flag(tp, USE_PHYLIB)) {
2856 do_low_power = false;
2857 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2858 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2859 struct phy_device *phydev;
2860 u32 phyid, advertising;
2862 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2864 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866 tp->link_config.orig_speed = phydev->speed;
2867 tp->link_config.orig_duplex = phydev->duplex;
2868 tp->link_config.orig_autoneg = phydev->autoneg;
2869 tp->link_config.orig_advertising = phydev->advertising;
2871 advertising = ADVERTISED_TP |
2873 ADVERTISED_Autoneg |
2874 ADVERTISED_10baseT_Half;
2876 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2877 if (tg3_flag(tp, WOL_SPEED_100MB))
2879 ADVERTISED_100baseT_Half |
2880 ADVERTISED_100baseT_Full |
2881 ADVERTISED_10baseT_Full;
2883 advertising |= ADVERTISED_10baseT_Full;
2886 phydev->advertising = advertising;
2888 phy_start_aneg(phydev);
2890 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2891 if (phyid != PHY_ID_BCMAC131) {
2892 phyid &= PHY_BCM_OUI_MASK;
2893 if (phyid == PHY_BCM_OUI_1 ||
2894 phyid == PHY_BCM_OUI_2 ||
2895 phyid == PHY_BCM_OUI_3)
2896 do_low_power = true;
2900 do_low_power = true;
2902 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2903 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2904 tp->link_config.orig_speed = tp->link_config.speed;
2905 tp->link_config.orig_duplex = tp->link_config.duplex;
2906 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2909 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2910 tp->link_config.speed = SPEED_10;
2911 tp->link_config.duplex = DUPLEX_HALF;
2912 tp->link_config.autoneg = AUTONEG_ENABLE;
2913 tg3_setup_phy(tp, 0);
2917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2920 val = tr32(GRC_VCPU_EXT_CTRL);
2921 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2922 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2926 for (i = 0; i < 200; i++) {
2927 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2928 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2933 if (tg3_flag(tp, WOL_CAP))
2934 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2935 WOL_DRV_STATE_SHUTDOWN |
2939 if (device_should_wake) {
2942 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2944 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2945 tg3_phy_auxctl_write(tp,
2946 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2947 MII_TG3_AUXCTL_PCTL_WOL_EN |
2948 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2949 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2953 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2954 mac_mode = MAC_MODE_PORT_MODE_GMII;
2956 mac_mode = MAC_MODE_PORT_MODE_MII;
2958 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2959 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2961 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2962 SPEED_100 : SPEED_10;
2963 if (tg3_5700_link_polarity(tp, speed))
2964 mac_mode |= MAC_MODE_LINK_POLARITY;
2966 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2969 mac_mode = MAC_MODE_PORT_MODE_TBI;
2972 if (!tg3_flag(tp, 5750_PLUS))
2973 tw32(MAC_LED_CTRL, tp->led_ctrl);
2975 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2976 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2977 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2978 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2980 if (tg3_flag(tp, ENABLE_APE))
2981 mac_mode |= MAC_MODE_APE_TX_EN |
2982 MAC_MODE_APE_RX_EN |
2983 MAC_MODE_TDE_ENABLE;
2985 tw32_f(MAC_MODE, mac_mode);
2988 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2992 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2993 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2997 base_val = tp->pci_clock_ctrl;
2998 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2999 CLOCK_CTRL_TXCLK_DISABLE);
3001 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3002 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3003 } else if (tg3_flag(tp, 5780_CLASS) ||
3004 tg3_flag(tp, CPMU_PRESENT) ||
3005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3007 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3008 u32 newbits1, newbits2;
3010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3012 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3013 CLOCK_CTRL_TXCLK_DISABLE |
3015 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3016 } else if (tg3_flag(tp, 5705_PLUS)) {
3017 newbits1 = CLOCK_CTRL_625_CORE;
3018 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3020 newbits1 = CLOCK_CTRL_ALTCLK;
3021 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3024 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3027 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3030 if (!tg3_flag(tp, 5705_PLUS)) {
3033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3035 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3036 CLOCK_CTRL_TXCLK_DISABLE |
3037 CLOCK_CTRL_44MHZ_CORE);
3039 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3042 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3043 tp->pci_clock_ctrl | newbits3, 40);
3047 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3048 tg3_power_down_phy(tp, do_low_power);
3050 tg3_frob_aux_power(tp, true);
3052 /* Workaround for unstable PLL clock */
3053 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3054 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3055 u32 val = tr32(0x7d00);
3057 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3059 if (!tg3_flag(tp, ENABLE_ASF)) {
3062 err = tg3_nvram_lock(tp);
3063 tg3_halt_cpu(tp, RX_CPU_BASE);
3065 tg3_nvram_unlock(tp);
3069 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3074 static void tg3_power_down(struct tg3 *tp)
3076 tg3_power_down_prepare(tp);
3078 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3079 pci_set_power_state(tp->pdev, PCI_D3hot);
3082 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3084 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3085 case MII_TG3_AUX_STAT_10HALF:
3087 *duplex = DUPLEX_HALF;
3090 case MII_TG3_AUX_STAT_10FULL:
3092 *duplex = DUPLEX_FULL;
3095 case MII_TG3_AUX_STAT_100HALF:
3097 *duplex = DUPLEX_HALF;
3100 case MII_TG3_AUX_STAT_100FULL:
3102 *duplex = DUPLEX_FULL;
3105 case MII_TG3_AUX_STAT_1000HALF:
3106 *speed = SPEED_1000;
3107 *duplex = DUPLEX_HALF;
3110 case MII_TG3_AUX_STAT_1000FULL:
3111 *speed = SPEED_1000;
3112 *duplex = DUPLEX_FULL;
3116 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3117 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3119 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3123 *speed = SPEED_INVALID;
3124 *duplex = DUPLEX_INVALID;
3129 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3134 new_adv = ADVERTISE_CSMA;
3135 if (advertise & ADVERTISED_10baseT_Half)
3136 new_adv |= ADVERTISE_10HALF;
3137 if (advertise & ADVERTISED_10baseT_Full)
3138 new_adv |= ADVERTISE_10FULL;
3139 if (advertise & ADVERTISED_100baseT_Half)
3140 new_adv |= ADVERTISE_100HALF;
3141 if (advertise & ADVERTISED_100baseT_Full)
3142 new_adv |= ADVERTISE_100FULL;
3144 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3146 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3150 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3154 if (advertise & ADVERTISED_1000baseT_Half)
3155 new_adv |= ADVERTISE_1000HALF;
3156 if (advertise & ADVERTISED_1000baseT_Full)
3157 new_adv |= ADVERTISE_1000FULL;
3159 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3160 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3161 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3163 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3167 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3170 tw32(TG3_CPMU_EEE_MODE,
3171 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3173 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3178 /* Advertise 100-BaseTX EEE ability */
3179 if (advertise & ADVERTISED_100baseT_Full)
3180 val |= MDIO_AN_EEE_ADV_100TX;
3181 /* Advertise 1000-BaseT EEE ability */
3182 if (advertise & ADVERTISED_1000baseT_Full)
3183 val |= MDIO_AN_EEE_ADV_1000T;
3184 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3188 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3190 case ASIC_REV_57765:
3192 /* If we advertised any eee advertisements above... */
3194 val = MII_TG3_DSP_TAP26_ALNOKO |
3195 MII_TG3_DSP_TAP26_RMRXSTO |
3196 MII_TG3_DSP_TAP26_OPCSINPT;
3197 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3200 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3201 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3202 MII_TG3_DSP_CH34TP2_HIBW01);
3205 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3214 static void tg3_phy_copper_begin(struct tg3 *tp)
3219 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3220 new_adv = ADVERTISED_10baseT_Half |
3221 ADVERTISED_10baseT_Full;
3222 if (tg3_flag(tp, WOL_SPEED_100MB))
3223 new_adv |= ADVERTISED_100baseT_Half |
3224 ADVERTISED_100baseT_Full;
3226 tg3_phy_autoneg_cfg(tp, new_adv,
3227 FLOW_CTRL_TX | FLOW_CTRL_RX);
3228 } else if (tp->link_config.speed == SPEED_INVALID) {
3229 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3230 tp->link_config.advertising &=
3231 ~(ADVERTISED_1000baseT_Half |
3232 ADVERTISED_1000baseT_Full);
3234 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3235 tp->link_config.flowctrl);
3237 /* Asking for a specific link mode. */
3238 if (tp->link_config.speed == SPEED_1000) {
3239 if (tp->link_config.duplex == DUPLEX_FULL)
3240 new_adv = ADVERTISED_1000baseT_Full;
3242 new_adv = ADVERTISED_1000baseT_Half;
3243 } else if (tp->link_config.speed == SPEED_100) {
3244 if (tp->link_config.duplex == DUPLEX_FULL)
3245 new_adv = ADVERTISED_100baseT_Full;
3247 new_adv = ADVERTISED_100baseT_Half;
3249 if (tp->link_config.duplex == DUPLEX_FULL)
3250 new_adv = ADVERTISED_10baseT_Full;
3252 new_adv = ADVERTISED_10baseT_Half;
3255 tg3_phy_autoneg_cfg(tp, new_adv,
3256 tp->link_config.flowctrl);
3259 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3260 tp->link_config.speed != SPEED_INVALID) {
3261 u32 bmcr, orig_bmcr;
3263 tp->link_config.active_speed = tp->link_config.speed;
3264 tp->link_config.active_duplex = tp->link_config.duplex;
3267 switch (tp->link_config.speed) {
3273 bmcr |= BMCR_SPEED100;
3277 bmcr |= BMCR_SPEED1000;
3281 if (tp->link_config.duplex == DUPLEX_FULL)
3282 bmcr |= BMCR_FULLDPLX;
3284 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3285 (bmcr != orig_bmcr)) {
3286 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3287 for (i = 0; i < 1500; i++) {
3291 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3292 tg3_readphy(tp, MII_BMSR, &tmp))
3294 if (!(tmp & BMSR_LSTATUS)) {
3299 tg3_writephy(tp, MII_BMCR, bmcr);
3303 tg3_writephy(tp, MII_BMCR,
3304 BMCR_ANENABLE | BMCR_ANRESTART);
3308 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3312 /* Turn off tap power management. */
3313 /* Set Extended packet length bit */
3314 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3316 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3317 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3318 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3319 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3320 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3327 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3329 u32 adv_reg, all_mask = 0;
3331 if (mask & ADVERTISED_10baseT_Half)
3332 all_mask |= ADVERTISE_10HALF;
3333 if (mask & ADVERTISED_10baseT_Full)
3334 all_mask |= ADVERTISE_10FULL;
3335 if (mask & ADVERTISED_100baseT_Half)
3336 all_mask |= ADVERTISE_100HALF;
3337 if (mask & ADVERTISED_100baseT_Full)
3338 all_mask |= ADVERTISE_100FULL;
3340 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3343 if ((adv_reg & ADVERTISE_ALL) != all_mask)
3346 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3350 if (mask & ADVERTISED_1000baseT_Half)
3351 all_mask |= ADVERTISE_1000HALF;
3352 if (mask & ADVERTISED_1000baseT_Full)
3353 all_mask |= ADVERTISE_1000FULL;
3355 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3358 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3359 if (tg3_ctrl != all_mask)
3365 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3369 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3372 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3373 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3375 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3376 if (curadv != reqadv)
3379 if (tg3_flag(tp, PAUSE_AUTONEG))
3380 tg3_readphy(tp, MII_LPA, rmtadv);
3382 /* Reprogram the advertisement register, even if it
3383 * does not affect the current link. If the link
3384 * gets renegotiated in the future, we can save an
3385 * additional renegotiation cycle by advertising
3386 * it correctly in the first place.
3388 if (curadv != reqadv) {
3389 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3390 ADVERTISE_PAUSE_ASYM);
3391 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3398 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3400 int current_link_up;
3402 u32 lcl_adv, rmt_adv;
3410 (MAC_STATUS_SYNC_CHANGED |
3411 MAC_STATUS_CFG_CHANGED |
3412 MAC_STATUS_MI_COMPLETION |
3413 MAC_STATUS_LNKSTATE_CHANGED));
3416 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3418 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3422 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3424 /* Some third-party PHYs need to be reset on link going
3427 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3430 netif_carrier_ok(tp->dev)) {
3431 tg3_readphy(tp, MII_BMSR, &bmsr);
3432 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3433 !(bmsr & BMSR_LSTATUS))
3439 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3440 tg3_readphy(tp, MII_BMSR, &bmsr);
3441 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3442 !tg3_flag(tp, INIT_COMPLETE))
3445 if (!(bmsr & BMSR_LSTATUS)) {
3446 err = tg3_init_5401phy_dsp(tp);
3450 tg3_readphy(tp, MII_BMSR, &bmsr);
3451 for (i = 0; i < 1000; i++) {
3453 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3454 (bmsr & BMSR_LSTATUS)) {
3460 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3461 TG3_PHY_REV_BCM5401_B0 &&
3462 !(bmsr & BMSR_LSTATUS) &&
3463 tp->link_config.active_speed == SPEED_1000) {
3464 err = tg3_phy_reset(tp);
3466 err = tg3_init_5401phy_dsp(tp);
3471 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3472 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3473 /* 5701 {A0,B0} CRC bug workaround */
3474 tg3_writephy(tp, 0x15, 0x0a75);
3475 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3476 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3477 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3480 /* Clear pending interrupts... */
3481 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3482 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3484 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3485 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3486 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3487 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3491 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3492 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3493 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3495 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3498 current_link_up = 0;
3499 current_speed = SPEED_INVALID;
3500 current_duplex = DUPLEX_INVALID;
3502 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3503 err = tg3_phy_auxctl_read(tp,
3504 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3506 if (!err && !(val & (1 << 10))) {
3507 tg3_phy_auxctl_write(tp,
3508 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3515 for (i = 0; i < 100; i++) {
3516 tg3_readphy(tp, MII_BMSR, &bmsr);
3517 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3518 (bmsr & BMSR_LSTATUS))
3523 if (bmsr & BMSR_LSTATUS) {
3526 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3527 for (i = 0; i < 2000; i++) {
3529 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3534 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3539 for (i = 0; i < 200; i++) {
3540 tg3_readphy(tp, MII_BMCR, &bmcr);
3541 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3543 if (bmcr && bmcr != 0x7fff)
3551 tp->link_config.active_speed = current_speed;
3552 tp->link_config.active_duplex = current_duplex;
3554 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3555 if ((bmcr & BMCR_ANENABLE) &&
3556 tg3_copper_is_advertising_all(tp,
3557 tp->link_config.advertising)) {
3558 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3560 current_link_up = 1;
3563 if (!(bmcr & BMCR_ANENABLE) &&
3564 tp->link_config.speed == current_speed &&
3565 tp->link_config.duplex == current_duplex &&
3566 tp->link_config.flowctrl ==
3567 tp->link_config.active_flowctrl) {
3568 current_link_up = 1;
3572 if (current_link_up == 1 &&
3573 tp->link_config.active_duplex == DUPLEX_FULL)
3574 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3578 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3579 tg3_phy_copper_begin(tp);
3581 tg3_readphy(tp, MII_BMSR, &bmsr);
3582 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3583 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3584 current_link_up = 1;
3587 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3588 if (current_link_up == 1) {
3589 if (tp->link_config.active_speed == SPEED_100 ||
3590 tp->link_config.active_speed == SPEED_10)
3591 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3593 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3594 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3595 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3597 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3599 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3600 if (tp->link_config.active_duplex == DUPLEX_HALF)
3601 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3603 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3604 if (current_link_up == 1 &&
3605 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3606 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3608 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3611 /* ??? Without this setting Netgear GA302T PHY does not
3612 * ??? send/receive packets...
3614 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3615 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3616 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3617 tw32_f(MAC_MI_MODE, tp->mi_mode);
3621 tw32_f(MAC_MODE, tp->mac_mode);
3624 tg3_phy_eee_adjust(tp, current_link_up);
3626 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3627 /* Polled via timer. */
3628 tw32_f(MAC_EVENT, 0);
3630 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3635 current_link_up == 1 &&
3636 tp->link_config.active_speed == SPEED_1000 &&
3637 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3640 (MAC_STATUS_SYNC_CHANGED |
3641 MAC_STATUS_CFG_CHANGED));
3644 NIC_SRAM_FIRMWARE_MBOX,
3645 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3648 /* Prevent send BD corruption. */
3649 if (tg3_flag(tp, CLKREQ_BUG)) {
3650 u16 oldlnkctl, newlnkctl;
3652 pci_read_config_word(tp->pdev,
3653 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3655 if (tp->link_config.active_speed == SPEED_100 ||
3656 tp->link_config.active_speed == SPEED_10)
3657 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3659 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3660 if (newlnkctl != oldlnkctl)
3661 pci_write_config_word(tp->pdev,
3662 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3666 if (current_link_up != netif_carrier_ok(tp->dev)) {
3667 if (current_link_up)
3668 netif_carrier_on(tp->dev);
3670 netif_carrier_off(tp->dev);
3671 tg3_link_report(tp);
3677 struct tg3_fiber_aneginfo {
3679 #define ANEG_STATE_UNKNOWN 0
3680 #define ANEG_STATE_AN_ENABLE 1
3681 #define ANEG_STATE_RESTART_INIT 2
3682 #define ANEG_STATE_RESTART 3
3683 #define ANEG_STATE_DISABLE_LINK_OK 4
3684 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3685 #define ANEG_STATE_ABILITY_DETECT 6
3686 #define ANEG_STATE_ACK_DETECT_INIT 7
3687 #define ANEG_STATE_ACK_DETECT 8
3688 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3689 #define ANEG_STATE_COMPLETE_ACK 10
3690 #define ANEG_STATE_IDLE_DETECT_INIT 11
3691 #define ANEG_STATE_IDLE_DETECT 12
3692 #define ANEG_STATE_LINK_OK 13
3693 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3694 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3697 #define MR_AN_ENABLE 0x00000001
3698 #define MR_RESTART_AN 0x00000002
3699 #define MR_AN_COMPLETE 0x00000004
3700 #define MR_PAGE_RX 0x00000008
3701 #define MR_NP_LOADED 0x00000010
3702 #define MR_TOGGLE_TX 0x00000020
3703 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3704 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3705 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3706 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3707 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3708 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3709 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3710 #define MR_TOGGLE_RX 0x00002000
3711 #define MR_NP_RX 0x00004000
3713 #define MR_LINK_OK 0x80000000
3715 unsigned long link_time, cur_time;
3717 u32 ability_match_cfg;
3718 int ability_match_count;
3720 char ability_match, idle_match, ack_match;
3722 u32 txconfig, rxconfig;
3723 #define ANEG_CFG_NP 0x00000080
3724 #define ANEG_CFG_ACK 0x00000040
3725 #define ANEG_CFG_RF2 0x00000020
3726 #define ANEG_CFG_RF1 0x00000010
3727 #define ANEG_CFG_PS2 0x00000001
3728 #define ANEG_CFG_PS1 0x00008000
3729 #define ANEG_CFG_HD 0x00004000
3730 #define ANEG_CFG_FD 0x00002000
3731 #define ANEG_CFG_INVAL 0x00001f06
3736 #define ANEG_TIMER_ENAB 2
3737 #define ANEG_FAILED -1
3739 #define ANEG_STATE_SETTLE_TIME 10000
3741 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3742 struct tg3_fiber_aneginfo *ap)
3745 unsigned long delta;
3749 if (ap->state == ANEG_STATE_UNKNOWN) {
3753 ap->ability_match_cfg = 0;
3754 ap->ability_match_count = 0;
3755 ap->ability_match = 0;
3761 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3762 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3764 if (rx_cfg_reg != ap->ability_match_cfg) {
3765 ap->ability_match_cfg = rx_cfg_reg;
3766 ap->ability_match = 0;
3767 ap->ability_match_count = 0;
3769 if (++ap->ability_match_count > 1) {
3770 ap->ability_match = 1;
3771 ap->ability_match_cfg = rx_cfg_reg;
3774 if (rx_cfg_reg & ANEG_CFG_ACK)
3782 ap->ability_match_cfg = 0;
3783 ap->ability_match_count = 0;
3784 ap->ability_match = 0;
3790 ap->rxconfig = rx_cfg_reg;
3793 switch (ap->state) {
3794 case ANEG_STATE_UNKNOWN:
3795 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3796 ap->state = ANEG_STATE_AN_ENABLE;
3799 case ANEG_STATE_AN_ENABLE:
3800 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3801 if (ap->flags & MR_AN_ENABLE) {
3804 ap->ability_match_cfg = 0;
3805 ap->ability_match_count = 0;
3806 ap->ability_match = 0;
3810 ap->state = ANEG_STATE_RESTART_INIT;
3812 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3816 case ANEG_STATE_RESTART_INIT:
3817 ap->link_time = ap->cur_time;
3818 ap->flags &= ~(MR_NP_LOADED);
3820 tw32(MAC_TX_AUTO_NEG, 0);
3821 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3822 tw32_f(MAC_MODE, tp->mac_mode);
3825 ret = ANEG_TIMER_ENAB;
3826 ap->state = ANEG_STATE_RESTART;
3829 case ANEG_STATE_RESTART:
3830 delta = ap->cur_time - ap->link_time;
3831 if (delta > ANEG_STATE_SETTLE_TIME)
3832 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3834 ret = ANEG_TIMER_ENAB;
3837 case ANEG_STATE_DISABLE_LINK_OK:
3841 case ANEG_STATE_ABILITY_DETECT_INIT:
3842 ap->flags &= ~(MR_TOGGLE_TX);
3843 ap->txconfig = ANEG_CFG_FD;
3844 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3845 if (flowctrl & ADVERTISE_1000XPAUSE)
3846 ap->txconfig |= ANEG_CFG_PS1;
3847 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3848 ap->txconfig |= ANEG_CFG_PS2;
3849 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3850 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3851 tw32_f(MAC_MODE, tp->mac_mode);
3854 ap->state = ANEG_STATE_ABILITY_DETECT;
3857 case ANEG_STATE_ABILITY_DETECT:
3858 if (ap->ability_match != 0 && ap->rxconfig != 0)
3859 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3862 case ANEG_STATE_ACK_DETECT_INIT:
3863 ap->txconfig |= ANEG_CFG_ACK;
3864 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3865 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3866 tw32_f(MAC_MODE, tp->mac_mode);
3869 ap->state = ANEG_STATE_ACK_DETECT;
3872 case ANEG_STATE_ACK_DETECT:
3873 if (ap->ack_match != 0) {
3874 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3875 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3876 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3878 ap->state = ANEG_STATE_AN_ENABLE;
3880 } else if (ap->ability_match != 0 &&
3881 ap->rxconfig == 0) {
3882 ap->state = ANEG_STATE_AN_ENABLE;
3886 case ANEG_STATE_COMPLETE_ACK_INIT:
3887 if (ap->rxconfig & ANEG_CFG_INVAL) {
3891 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3892 MR_LP_ADV_HALF_DUPLEX |
3893 MR_LP_ADV_SYM_PAUSE |
3894 MR_LP_ADV_ASYM_PAUSE |
3895 MR_LP_ADV_REMOTE_FAULT1 |
3896 MR_LP_ADV_REMOTE_FAULT2 |
3897 MR_LP_ADV_NEXT_PAGE |
3900 if (ap->rxconfig & ANEG_CFG_FD)
3901 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3902 if (ap->rxconfig & ANEG_CFG_HD)
3903 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3904 if (ap->rxconfig & ANEG_CFG_PS1)
3905 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3906 if (ap->rxconfig & ANEG_CFG_PS2)
3907 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3908 if (ap->rxconfig & ANEG_CFG_RF1)
3909 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3910 if (ap->rxconfig & ANEG_CFG_RF2)
3911 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3912 if (ap->rxconfig & ANEG_CFG_NP)
3913 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3915 ap->link_time = ap->cur_time;
3917 ap->flags ^= (MR_TOGGLE_TX);
3918 if (ap->rxconfig & 0x0008)
3919 ap->flags |= MR_TOGGLE_RX;
3920 if (ap->rxconfig & ANEG_CFG_NP)
3921 ap->flags |= MR_NP_RX;
3922 ap->flags |= MR_PAGE_RX;
3924 ap->state = ANEG_STATE_COMPLETE_ACK;
3925 ret = ANEG_TIMER_ENAB;
3928 case ANEG_STATE_COMPLETE_ACK:
3929 if (ap->ability_match != 0 &&
3930 ap->rxconfig == 0) {
3931 ap->state = ANEG_STATE_AN_ENABLE;
3934 delta = ap->cur_time - ap->link_time;
3935 if (delta > ANEG_STATE_SETTLE_TIME) {
3936 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3937 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3939 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3940 !(ap->flags & MR_NP_RX)) {
3941 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3949 case ANEG_STATE_IDLE_DETECT_INIT:
3950 ap->link_time = ap->cur_time;
3951 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3952 tw32_f(MAC_MODE, tp->mac_mode);
3955 ap->state = ANEG_STATE_IDLE_DETECT;
3956 ret = ANEG_TIMER_ENAB;
3959 case ANEG_STATE_IDLE_DETECT:
3960 if (ap->ability_match != 0 &&
3961 ap->rxconfig == 0) {
3962 ap->state = ANEG_STATE_AN_ENABLE;
3965 delta = ap->cur_time - ap->link_time;
3966 if (delta > ANEG_STATE_SETTLE_TIME) {
3967 /* XXX another gem from the Broadcom driver :( */
3968 ap->state = ANEG_STATE_LINK_OK;
3972 case ANEG_STATE_LINK_OK:
3973 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3977 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3978 /* ??? unimplemented */
3981 case ANEG_STATE_NEXT_PAGE_WAIT:
3982 /* ??? unimplemented */
3993 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3996 struct tg3_fiber_aneginfo aninfo;
3997 int status = ANEG_FAILED;
4001 tw32_f(MAC_TX_AUTO_NEG, 0);
4003 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4004 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4007 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4010 memset(&aninfo, 0, sizeof(aninfo));
4011 aninfo.flags |= MR_AN_ENABLE;
4012 aninfo.state = ANEG_STATE_UNKNOWN;
4013 aninfo.cur_time = 0;
4015 while (++tick < 195000) {
4016 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4017 if (status == ANEG_DONE || status == ANEG_FAILED)
4023 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4024 tw32_f(MAC_MODE, tp->mac_mode);
4027 *txflags = aninfo.txconfig;
4028 *rxflags = aninfo.flags;
4030 if (status == ANEG_DONE &&
4031 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4032 MR_LP_ADV_FULL_DUPLEX)))
4038 static void tg3_init_bcm8002(struct tg3 *tp)
4040 u32 mac_status = tr32(MAC_STATUS);
4043 /* Reset when initting first time or we have a link. */
4044 if (tg3_flag(tp, INIT_COMPLETE) &&
4045 !(mac_status & MAC_STATUS_PCS_SYNCED))
4048 /* Set PLL lock range. */
4049 tg3_writephy(tp, 0x16, 0x8007);
4052 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4054 /* Wait for reset to complete. */
4055 /* XXX schedule_timeout() ... */
4056 for (i = 0; i < 500; i++)
4059 /* Config mode; select PMA/Ch 1 regs. */
4060 tg3_writephy(tp, 0x10, 0x8411);
4062 /* Enable auto-lock and comdet, select txclk for tx. */
4063 tg3_writephy(tp, 0x11, 0x0a10);
4065 tg3_writephy(tp, 0x18, 0x00a0);
4066 tg3_writephy(tp, 0x16, 0x41ff);
4068 /* Assert and deassert POR. */
4069 tg3_writephy(tp, 0x13, 0x0400);
4071 tg3_writephy(tp, 0x13, 0x0000);
4073 tg3_writephy(tp, 0x11, 0x0a50);
4075 tg3_writephy(tp, 0x11, 0x0a10);
4077 /* Wait for signal to stabilize */
4078 /* XXX schedule_timeout() ... */
4079 for (i = 0; i < 15000; i++)
4082 /* Deselect the channel register so we can read the PHYID
4085 tg3_writephy(tp, 0x10, 0x8011);
4088 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4091 u32 sg_dig_ctrl, sg_dig_status;
4092 u32 serdes_cfg, expected_sg_dig_ctrl;
4093 int workaround, port_a;
4094 int current_link_up;
4097 expected_sg_dig_ctrl = 0;
4100 current_link_up = 0;
4102 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4103 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4105 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4108 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4109 /* preserve bits 20-23 for voltage regulator */
4110 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4113 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4115 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4116 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4118 u32 val = serdes_cfg;
4124 tw32_f(MAC_SERDES_CFG, val);
4127 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4129 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4130 tg3_setup_flow_control(tp, 0, 0);
4131 current_link_up = 1;
4136 /* Want auto-negotiation. */
4137 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4139 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4140 if (flowctrl & ADVERTISE_1000XPAUSE)
4141 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4142 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4143 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4145 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4146 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4147 tp->serdes_counter &&
4148 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4149 MAC_STATUS_RCVD_CFG)) ==
4150 MAC_STATUS_PCS_SYNCED)) {
4151 tp->serdes_counter--;
4152 current_link_up = 1;
4157 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4158 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4160 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4162 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4163 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4164 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4165 MAC_STATUS_SIGNAL_DET)) {
4166 sg_dig_status = tr32(SG_DIG_STATUS);
4167 mac_status = tr32(MAC_STATUS);
4169 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4170 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4171 u32 local_adv = 0, remote_adv = 0;
4173 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4174 local_adv |= ADVERTISE_1000XPAUSE;
4175 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4176 local_adv |= ADVERTISE_1000XPSE_ASYM;
4178 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4179 remote_adv |= LPA_1000XPAUSE;
4180 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4181 remote_adv |= LPA_1000XPAUSE_ASYM;
4183 tg3_setup_flow_control(tp, local_adv, remote_adv);
4184 current_link_up = 1;
4185 tp->serdes_counter = 0;
4186 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4187 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4188 if (tp->serdes_counter)
4189 tp->serdes_counter--;
4192 u32 val = serdes_cfg;
4199 tw32_f(MAC_SERDES_CFG, val);
4202 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4205 /* Link parallel detection - link is up */
4206 /* only if we have PCS_SYNC and not */
4207 /* receiving config code words */
4208 mac_status = tr32(MAC_STATUS);
4209 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4210 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4211 tg3_setup_flow_control(tp, 0, 0);
4212 current_link_up = 1;
4214 TG3_PHYFLG_PARALLEL_DETECT;
4215 tp->serdes_counter =
4216 SERDES_PARALLEL_DET_TIMEOUT;
4218 goto restart_autoneg;
4222 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4223 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4227 return current_link_up;
4230 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4232 int current_link_up = 0;
4234 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4237 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4238 u32 txflags, rxflags;
4241 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4242 u32 local_adv = 0, remote_adv = 0;
4244 if (txflags & ANEG_CFG_PS1)
4245 local_adv |= ADVERTISE_1000XPAUSE;
4246 if (txflags & ANEG_CFG_PS2)
4247 local_adv |= ADVERTISE_1000XPSE_ASYM;
4249 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4250 remote_adv |= LPA_1000XPAUSE;
4251 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4252 remote_adv |= LPA_1000XPAUSE_ASYM;
4254 tg3_setup_flow_control(tp, local_adv, remote_adv);
4256 current_link_up = 1;
4258 for (i = 0; i < 30; i++) {
4261 (MAC_STATUS_SYNC_CHANGED |
4262 MAC_STATUS_CFG_CHANGED));
4264 if ((tr32(MAC_STATUS) &
4265 (MAC_STATUS_SYNC_CHANGED |
4266 MAC_STATUS_CFG_CHANGED)) == 0)
4270 mac_status = tr32(MAC_STATUS);
4271 if (current_link_up == 0 &&
4272 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4273 !(mac_status & MAC_STATUS_RCVD_CFG))
4274 current_link_up = 1;
4276 tg3_setup_flow_control(tp, 0, 0);
4278 /* Forcing 1000FD link up. */
4279 current_link_up = 1;
4281 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4284 tw32_f(MAC_MODE, tp->mac_mode);
4289 return current_link_up;
4292 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4295 u16 orig_active_speed;
4296 u8 orig_active_duplex;
4298 int current_link_up;
4301 orig_pause_cfg = tp->link_config.active_flowctrl;
4302 orig_active_speed = tp->link_config.active_speed;
4303 orig_active_duplex = tp->link_config.active_duplex;
4305 if (!tg3_flag(tp, HW_AUTONEG) &&
4306 netif_carrier_ok(tp->dev) &&
4307 tg3_flag(tp, INIT_COMPLETE)) {
4308 mac_status = tr32(MAC_STATUS);
4309 mac_status &= (MAC_STATUS_PCS_SYNCED |
4310 MAC_STATUS_SIGNAL_DET |
4311 MAC_STATUS_CFG_CHANGED |
4312 MAC_STATUS_RCVD_CFG);
4313 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4314 MAC_STATUS_SIGNAL_DET)) {
4315 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4316 MAC_STATUS_CFG_CHANGED));
4321 tw32_f(MAC_TX_AUTO_NEG, 0);
4323 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4324 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4325 tw32_f(MAC_MODE, tp->mac_mode);
4328 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4329 tg3_init_bcm8002(tp);
4331 /* Enable link change event even when serdes polling. */
4332 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4335 current_link_up = 0;
4336 mac_status = tr32(MAC_STATUS);
4338 if (tg3_flag(tp, HW_AUTONEG))
4339 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4341 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4343 tp->napi[0].hw_status->status =
4344 (SD_STATUS_UPDATED |
4345 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4347 for (i = 0; i < 100; i++) {
4348 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4349 MAC_STATUS_CFG_CHANGED));
4351 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4352 MAC_STATUS_CFG_CHANGED |
4353 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4357 mac_status = tr32(MAC_STATUS);
4358 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4359 current_link_up = 0;
4360 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4361 tp->serdes_counter == 0) {
4362 tw32_f(MAC_MODE, (tp->mac_mode |
4363 MAC_MODE_SEND_CONFIGS));
4365 tw32_f(MAC_MODE, tp->mac_mode);
4369 if (current_link_up == 1) {
4370 tp->link_config.active_speed = SPEED_1000;
4371 tp->link_config.active_duplex = DUPLEX_FULL;
4372 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4373 LED_CTRL_LNKLED_OVERRIDE |
4374 LED_CTRL_1000MBPS_ON));
4376 tp->link_config.active_speed = SPEED_INVALID;
4377 tp->link_config.active_duplex = DUPLEX_INVALID;
4378 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4379 LED_CTRL_LNKLED_OVERRIDE |
4380 LED_CTRL_TRAFFIC_OVERRIDE));
4383 if (current_link_up != netif_carrier_ok(tp->dev)) {
4384 if (current_link_up)
4385 netif_carrier_on(tp->dev);
4387 netif_carrier_off(tp->dev);
4388 tg3_link_report(tp);
4390 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4391 if (orig_pause_cfg != now_pause_cfg ||
4392 orig_active_speed != tp->link_config.active_speed ||
4393 orig_active_duplex != tp->link_config.active_duplex)
4394 tg3_link_report(tp);
4400 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4402 int current_link_up, err = 0;
4406 u32 local_adv, remote_adv;
4408 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4409 tw32_f(MAC_MODE, tp->mac_mode);
4415 (MAC_STATUS_SYNC_CHANGED |
4416 MAC_STATUS_CFG_CHANGED |
4417 MAC_STATUS_MI_COMPLETION |
4418 MAC_STATUS_LNKSTATE_CHANGED));
4424 current_link_up = 0;
4425 current_speed = SPEED_INVALID;
4426 current_duplex = DUPLEX_INVALID;
4428 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4429 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4431 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4432 bmsr |= BMSR_LSTATUS;
4434 bmsr &= ~BMSR_LSTATUS;
4437 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4439 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4440 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4441 /* do nothing, just check for link up at the end */
4442 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4445 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4446 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4447 ADVERTISE_1000XPAUSE |
4448 ADVERTISE_1000XPSE_ASYM |
4451 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4453 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4454 new_adv |= ADVERTISE_1000XHALF;
4455 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4456 new_adv |= ADVERTISE_1000XFULL;
4458 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4459 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4460 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4461 tg3_writephy(tp, MII_BMCR, bmcr);
4463 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4464 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4465 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4472 bmcr &= ~BMCR_SPEED1000;
4473 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4475 if (tp->link_config.duplex == DUPLEX_FULL)
4476 new_bmcr |= BMCR_FULLDPLX;
4478 if (new_bmcr != bmcr) {
4479 /* BMCR_SPEED1000 is a reserved bit that needs
4480 * to be set on write.
4482 new_bmcr |= BMCR_SPEED1000;
4484 /* Force a linkdown */
4485 if (netif_carrier_ok(tp->dev)) {
4488 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4489 adv &= ~(ADVERTISE_1000XFULL |
4490 ADVERTISE_1000XHALF |
4492 tg3_writephy(tp, MII_ADVERTISE, adv);
4493 tg3_writephy(tp, MII_BMCR, bmcr |
4497 netif_carrier_off(tp->dev);
4499 tg3_writephy(tp, MII_BMCR, new_bmcr);
4501 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4502 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4503 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4505 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4506 bmsr |= BMSR_LSTATUS;
4508 bmsr &= ~BMSR_LSTATUS;
4510 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4514 if (bmsr & BMSR_LSTATUS) {
4515 current_speed = SPEED_1000;
4516 current_link_up = 1;
4517 if (bmcr & BMCR_FULLDPLX)
4518 current_duplex = DUPLEX_FULL;
4520 current_duplex = DUPLEX_HALF;
4525 if (bmcr & BMCR_ANENABLE) {
4528 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4529 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4530 common = local_adv & remote_adv;
4531 if (common & (ADVERTISE_1000XHALF |
4532 ADVERTISE_1000XFULL)) {
4533 if (common & ADVERTISE_1000XFULL)
4534 current_duplex = DUPLEX_FULL;
4536 current_duplex = DUPLEX_HALF;
4537 } else if (!tg3_flag(tp, 5780_CLASS)) {
4538 /* Link is up via parallel detect */
4540 current_link_up = 0;
4545 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4546 tg3_setup_flow_control(tp, local_adv, remote_adv);
4548 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4549 if (tp->link_config.active_duplex == DUPLEX_HALF)
4550 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4552 tw32_f(MAC_MODE, tp->mac_mode);
4555 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4557 tp->link_config.active_speed = current_speed;
4558 tp->link_config.active_duplex = current_duplex;
4560 if (current_link_up != netif_carrier_ok(tp->dev)) {
4561 if (current_link_up)
4562 netif_carrier_on(tp->dev);
4564 netif_carrier_off(tp->dev);
4565 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4567 tg3_link_report(tp);
4572 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4574 if (tp->serdes_counter) {
4575 /* Give autoneg time to complete. */
4576 tp->serdes_counter--;
4580 if (!netif_carrier_ok(tp->dev) &&
4581 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4584 tg3_readphy(tp, MII_BMCR, &bmcr);
4585 if (bmcr & BMCR_ANENABLE) {
4588 /* Select shadow register 0x1f */
4589 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4590 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4592 /* Select expansion interrupt status register */
4593 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4594 MII_TG3_DSP_EXP1_INT_STAT);
4595 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4596 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4598 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4599 /* We have signal detect and not receiving
4600 * config code words, link is up by parallel
4604 bmcr &= ~BMCR_ANENABLE;
4605 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4606 tg3_writephy(tp, MII_BMCR, bmcr);
4607 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4610 } else if (netif_carrier_ok(tp->dev) &&
4611 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4612 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4615 /* Select expansion interrupt status register */
4616 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4617 MII_TG3_DSP_EXP1_INT_STAT);
4618 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4622 /* Config code words received, turn on autoneg. */
4623 tg3_readphy(tp, MII_BMCR, &bmcr);
4624 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4626 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4632 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4637 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4638 err = tg3_setup_fiber_phy(tp, force_reset);
4639 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4640 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4642 err = tg3_setup_copper_phy(tp, force_reset);
4644 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4647 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4648 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4650 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4655 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4656 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4657 tw32(GRC_MISC_CFG, val);
4660 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4661 (6 << TX_LENGTHS_IPG_SHIFT);
4662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4663 val |= tr32(MAC_TX_LENGTHS) &
4664 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4665 TX_LENGTHS_CNT_DWN_VAL_MSK);
4667 if (tp->link_config.active_speed == SPEED_1000 &&
4668 tp->link_config.active_duplex == DUPLEX_HALF)
4669 tw32(MAC_TX_LENGTHS, val |
4670 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4672 tw32(MAC_TX_LENGTHS, val |
4673 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4675 if (!tg3_flag(tp, 5705_PLUS)) {
4676 if (netif_carrier_ok(tp->dev)) {
4677 tw32(HOSTCC_STAT_COAL_TICKS,
4678 tp->coal.stats_block_coalesce_usecs);
4680 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4684 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4685 val = tr32(PCIE_PWR_MGMT_THRESH);
4686 if (!netif_carrier_ok(tp->dev))
4687 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4690 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4691 tw32(PCIE_PWR_MGMT_THRESH, val);
4697 static inline int tg3_irq_sync(struct tg3 *tp)
4699 return tp->irq_sync;
4702 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4706 dst = (u32 *)((u8 *)dst + off);
4707 for (i = 0; i < len; i += sizeof(u32))
4708 *dst++ = tr32(off + i);
4711 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4713 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4714 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4715 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4716 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4717 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4718 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4719 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4720 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4721 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4722 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4723 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4724 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4725 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4726 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4727 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4728 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4729 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4730 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4731 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4733 if (tg3_flag(tp, SUPPORT_MSIX))
4734 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4736 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4737 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4738 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4739 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4740 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4741 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4742 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4743 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4745 if (!tg3_flag(tp, 5705_PLUS)) {
4746 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4747 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4748 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4751 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4752 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4753 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4754 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4755 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4757 if (tg3_flag(tp, NVRAM))
4758 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4761 static void tg3_dump_state(struct tg3 *tp)
4766 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4768 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4772 if (tg3_flag(tp, PCI_EXPRESS)) {
4773 /* Read up to but not including private PCI registers */
4774 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4775 regs[i / sizeof(u32)] = tr32(i);
4777 tg3_dump_legacy_regs(tp, regs);
4779 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4780 if (!regs[i + 0] && !regs[i + 1] &&
4781 !regs[i + 2] && !regs[i + 3])
4784 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4786 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4791 for (i = 0; i < tp->irq_cnt; i++) {
4792 struct tg3_napi *tnapi = &tp->napi[i];
4794 /* SW status block */
4796 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4798 tnapi->hw_status->status,
4799 tnapi->hw_status->status_tag,
4800 tnapi->hw_status->rx_jumbo_consumer,
4801 tnapi->hw_status->rx_consumer,
4802 tnapi->hw_status->rx_mini_consumer,
4803 tnapi->hw_status->idx[0].rx_producer,
4804 tnapi->hw_status->idx[0].tx_consumer);
4807 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4809 tnapi->last_tag, tnapi->last_irq_tag,
4810 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4812 tnapi->prodring.rx_std_prod_idx,
4813 tnapi->prodring.rx_std_cons_idx,
4814 tnapi->prodring.rx_jmb_prod_idx,
4815 tnapi->prodring.rx_jmb_cons_idx);
4819 /* This is called whenever we suspect that the system chipset is re-
4820 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4821 * is bogus tx completions. We try to recover by setting the
4822 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4825 static void tg3_tx_recover(struct tg3 *tp)
4827 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4828 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4830 netdev_warn(tp->dev,
4831 "The system may be re-ordering memory-mapped I/O "
4832 "cycles to the network device, attempting to recover. "
4833 "Please report the problem to the driver maintainer "
4834 "and include system chipset information.\n");
4836 spin_lock(&tp->lock);
4837 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4838 spin_unlock(&tp->lock);
4841 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4843 /* Tell compiler to fetch tx indices from memory. */
4845 return tnapi->tx_pending -
4846 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4849 /* Tigon3 never reports partial packet sends. So we do not
4850 * need special logic to handle SKBs that have not had all
4851 * of their frags sent yet, like SunGEM does.
4853 static void tg3_tx(struct tg3_napi *tnapi)
4855 struct tg3 *tp = tnapi->tp;
4856 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4857 u32 sw_idx = tnapi->tx_cons;
4858 struct netdev_queue *txq;
4859 int index = tnapi - tp->napi;
4861 if (tg3_flag(tp, ENABLE_TSS))
4864 txq = netdev_get_tx_queue(tp->dev, index);
4866 while (sw_idx != hw_idx) {
4867 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4868 struct sk_buff *skb = ri->skb;
4871 if (unlikely(skb == NULL)) {
4876 pci_unmap_single(tp->pdev,
4877 dma_unmap_addr(ri, mapping),
4883 while (ri->fragmented) {
4884 ri->fragmented = false;
4885 sw_idx = NEXT_TX(sw_idx);
4886 ri = &tnapi->tx_buffers[sw_idx];
4889 sw_idx = NEXT_TX(sw_idx);
4891 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4892 ri = &tnapi->tx_buffers[sw_idx];
4893 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4896 pci_unmap_page(tp->pdev,
4897 dma_unmap_addr(ri, mapping),
4898 skb_shinfo(skb)->frags[i].size,
4901 while (ri->fragmented) {
4902 ri->fragmented = false;
4903 sw_idx = NEXT_TX(sw_idx);
4904 ri = &tnapi->tx_buffers[sw_idx];
4907 sw_idx = NEXT_TX(sw_idx);
4912 if (unlikely(tx_bug)) {
4918 tnapi->tx_cons = sw_idx;
4920 /* Need to make the tx_cons update visible to tg3_start_xmit()
4921 * before checking for netif_queue_stopped(). Without the
4922 * memory barrier, there is a small possibility that tg3_start_xmit()
4923 * will miss it and cause the queue to be stopped forever.
4927 if (unlikely(netif_tx_queue_stopped(txq) &&
4928 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4929 __netif_tx_lock(txq, smp_processor_id());
4930 if (netif_tx_queue_stopped(txq) &&
4931 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4932 netif_tx_wake_queue(txq);
4933 __netif_tx_unlock(txq);
4937 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4942 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4943 map_sz, PCI_DMA_FROMDEVICE);
4944 dev_kfree_skb_any(ri->skb);
4948 /* Returns size of skb allocated or < 0 on error.
4950 * We only need to fill in the address because the other members
4951 * of the RX descriptor are invariant, see tg3_init_rings.
4953 * Note the purposeful assymetry of cpu vs. chip accesses. For
4954 * posting buffers we only dirty the first cache line of the RX
4955 * descriptor (containing the address). Whereas for the RX status
4956 * buffers the cpu only reads the last cacheline of the RX descriptor
4957 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4959 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4960 u32 opaque_key, u32 dest_idx_unmasked)
4962 struct tg3_rx_buffer_desc *desc;
4963 struct ring_info *map;
4964 struct sk_buff *skb;
4966 int skb_size, dest_idx;
4968 switch (opaque_key) {
4969 case RXD_OPAQUE_RING_STD:
4970 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4971 desc = &tpr->rx_std[dest_idx];
4972 map = &tpr->rx_std_buffers[dest_idx];
4973 skb_size = tp->rx_pkt_map_sz;
4976 case RXD_OPAQUE_RING_JUMBO:
4977 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4978 desc = &tpr->rx_jmb[dest_idx].std;
4979 map = &tpr->rx_jmb_buffers[dest_idx];
4980 skb_size = TG3_RX_JMB_MAP_SZ;
4987 /* Do not overwrite any of the map or rp information
4988 * until we are sure we can commit to a new buffer.
4990 * Callers depend upon this behavior and assume that
4991 * we leave everything unchanged if we fail.
4993 skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
4997 skb_reserve(skb, TG3_RX_OFFSET(tp));
4999 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5000 PCI_DMA_FROMDEVICE);
5001 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5007 dma_unmap_addr_set(map, mapping, mapping);
5009 desc->addr_hi = ((u64)mapping >> 32);
5010 desc->addr_lo = ((u64)mapping & 0xffffffff);
5015 /* We only need to move over in the address because the other
5016 * members of the RX descriptor are invariant. See notes above
5017 * tg3_alloc_rx_skb for full details.
5019 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5020 struct tg3_rx_prodring_set *dpr,
5021 u32 opaque_key, int src_idx,
5022 u32 dest_idx_unmasked)
5024 struct tg3 *tp = tnapi->tp;
5025 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5026 struct ring_info *src_map, *dest_map;
5027 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5030 switch (opaque_key) {
5031 case RXD_OPAQUE_RING_STD:
5032 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5033 dest_desc = &dpr->rx_std[dest_idx];
5034 dest_map = &dpr->rx_std_buffers[dest_idx];
5035 src_desc = &spr->rx_std[src_idx];
5036 src_map = &spr->rx_std_buffers[src_idx];
5039 case RXD_OPAQUE_RING_JUMBO:
5040 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5041 dest_desc = &dpr->rx_jmb[dest_idx].std;
5042 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5043 src_desc = &spr->rx_jmb[src_idx].std;
5044 src_map = &spr->rx_jmb_buffers[src_idx];
5051 dest_map->skb = src_map->skb;
5052 dma_unmap_addr_set(dest_map, mapping,
5053 dma_unmap_addr(src_map, mapping));
5054 dest_desc->addr_hi = src_desc->addr_hi;
5055 dest_desc->addr_lo = src_desc->addr_lo;
5057 /* Ensure that the update to the skb happens after the physical
5058 * addresses have been transferred to the new BD location.
5062 src_map->skb = NULL;
5065 /* The RX ring scheme is composed of multiple rings which post fresh
5066 * buffers to the chip, and one special ring the chip uses to report
5067 * status back to the host.
5069 * The special ring reports the status of received packets to the
5070 * host. The chip does not write into the original descriptor the
5071 * RX buffer was obtained from. The chip simply takes the original
5072 * descriptor as provided by the host, updates the status and length
5073 * field, then writes this into the next status ring entry.
5075 * Each ring the host uses to post buffers to the chip is described
5076 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5077 * it is first placed into the on-chip ram. When the packet's length
5078 * is known, it walks down the TG3_BDINFO entries to select the ring.
5079 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5080 * which is within the range of the new packet's length is chosen.
5082 * The "separate ring for rx status" scheme may sound queer, but it makes
5083 * sense from a cache coherency perspective. If only the host writes
5084 * to the buffer post rings, and only the chip writes to the rx status
5085 * rings, then cache lines never move beyond shared-modified state.
5086 * If both the host and chip were to write into the same ring, cache line
5087 * eviction could occur since both entities want it in an exclusive state.
5089 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5091 struct tg3 *tp = tnapi->tp;
5092 u32 work_mask, rx_std_posted = 0;
5093 u32 std_prod_idx, jmb_prod_idx;
5094 u32 sw_idx = tnapi->rx_rcb_ptr;
5097 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5099 hw_idx = *(tnapi->rx_rcb_prod_idx);
5101 * We need to order the read of hw_idx and the read of
5102 * the opaque cookie.
5107 std_prod_idx = tpr->rx_std_prod_idx;
5108 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5109 while (sw_idx != hw_idx && budget > 0) {
5110 struct ring_info *ri;
5111 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5113 struct sk_buff *skb;
5114 dma_addr_t dma_addr;
5115 u32 opaque_key, desc_idx, *post_ptr;
5117 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5118 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5119 if (opaque_key == RXD_OPAQUE_RING_STD) {
5120 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5121 dma_addr = dma_unmap_addr(ri, mapping);
5123 post_ptr = &std_prod_idx;
5125 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5126 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5127 dma_addr = dma_unmap_addr(ri, mapping);
5129 post_ptr = &jmb_prod_idx;
5131 goto next_pkt_nopost;
5133 work_mask |= opaque_key;
5135 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5136 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5138 tg3_recycle_rx(tnapi, tpr, opaque_key,
5139 desc_idx, *post_ptr);
5141 /* Other statistics kept track of by card. */
5146 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5149 if (len > TG3_RX_COPY_THRESH(tp)) {
5152 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5157 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5158 PCI_DMA_FROMDEVICE);
5160 /* Ensure that the update to the skb happens
5161 * after the usage of the old DMA mapping.
5169 struct sk_buff *copy_skb;
5171 tg3_recycle_rx(tnapi, tpr, opaque_key,
5172 desc_idx, *post_ptr);
5174 copy_skb = netdev_alloc_skb(tp->dev, len +
5176 if (copy_skb == NULL)
5177 goto drop_it_no_recycle;
5179 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5180 skb_put(copy_skb, len);
5181 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5182 skb_copy_from_linear_data(skb, copy_skb->data, len);
5183 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5185 /* We'll reuse the original ring buffer. */
5189 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5190 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5191 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5192 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5193 skb->ip_summed = CHECKSUM_UNNECESSARY;
5195 skb_checksum_none_assert(skb);
5197 skb->protocol = eth_type_trans(skb, tp->dev);
5199 if (len > (tp->dev->mtu + ETH_HLEN) &&
5200 skb->protocol != htons(ETH_P_8021Q)) {
5202 goto drop_it_no_recycle;
5205 if (desc->type_flags & RXD_FLAG_VLAN &&
5206 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5207 __vlan_hwaccel_put_tag(skb,
5208 desc->err_vlan & RXD_VLAN_MASK);
5210 napi_gro_receive(&tnapi->napi, skb);
5218 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5219 tpr->rx_std_prod_idx = std_prod_idx &
5220 tp->rx_std_ring_mask;
5221 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5222 tpr->rx_std_prod_idx);
5223 work_mask &= ~RXD_OPAQUE_RING_STD;
5228 sw_idx &= tp->rx_ret_ring_mask;
5230 /* Refresh hw_idx to see if there is new work */
5231 if (sw_idx == hw_idx) {
5232 hw_idx = *(tnapi->rx_rcb_prod_idx);
5237 /* ACK the status ring. */
5238 tnapi->rx_rcb_ptr = sw_idx;
5239 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5241 /* Refill RX ring(s). */
5242 if (!tg3_flag(tp, ENABLE_RSS)) {
5243 if (work_mask & RXD_OPAQUE_RING_STD) {
5244 tpr->rx_std_prod_idx = std_prod_idx &
5245 tp->rx_std_ring_mask;
5246 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5247 tpr->rx_std_prod_idx);
5249 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5250 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5251 tp->rx_jmb_ring_mask;
5252 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5253 tpr->rx_jmb_prod_idx);
5256 } else if (work_mask) {
5257 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5258 * updated before the producer indices can be updated.
5262 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5263 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5265 if (tnapi != &tp->napi[1])
5266 napi_schedule(&tp->napi[1].napi);
5272 static void tg3_poll_link(struct tg3 *tp)
5274 /* handle link change and other phy events */
5275 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5276 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5278 if (sblk->status & SD_STATUS_LINK_CHG) {
5279 sblk->status = SD_STATUS_UPDATED |
5280 (sblk->status & ~SD_STATUS_LINK_CHG);
5281 spin_lock(&tp->lock);
5282 if (tg3_flag(tp, USE_PHYLIB)) {
5284 (MAC_STATUS_SYNC_CHANGED |
5285 MAC_STATUS_CFG_CHANGED |
5286 MAC_STATUS_MI_COMPLETION |
5287 MAC_STATUS_LNKSTATE_CHANGED));
5290 tg3_setup_phy(tp, 0);
5291 spin_unlock(&tp->lock);
5296 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5297 struct tg3_rx_prodring_set *dpr,
5298 struct tg3_rx_prodring_set *spr)
5300 u32 si, di, cpycnt, src_prod_idx;
5304 src_prod_idx = spr->rx_std_prod_idx;
5306 /* Make sure updates to the rx_std_buffers[] entries and the
5307 * standard producer index are seen in the correct order.
5311 if (spr->rx_std_cons_idx == src_prod_idx)
5314 if (spr->rx_std_cons_idx < src_prod_idx)
5315 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5317 cpycnt = tp->rx_std_ring_mask + 1 -
5318 spr->rx_std_cons_idx;
5320 cpycnt = min(cpycnt,
5321 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5323 si = spr->rx_std_cons_idx;
5324 di = dpr->rx_std_prod_idx;
5326 for (i = di; i < di + cpycnt; i++) {
5327 if (dpr->rx_std_buffers[i].skb) {
5337 /* Ensure that updates to the rx_std_buffers ring and the
5338 * shadowed hardware producer ring from tg3_recycle_skb() are
5339 * ordered correctly WRT the skb check above.
5343 memcpy(&dpr->rx_std_buffers[di],
5344 &spr->rx_std_buffers[si],
5345 cpycnt * sizeof(struct ring_info));
5347 for (i = 0; i < cpycnt; i++, di++, si++) {
5348 struct tg3_rx_buffer_desc *sbd, *dbd;
5349 sbd = &spr->rx_std[si];
5350 dbd = &dpr->rx_std[di];
5351 dbd->addr_hi = sbd->addr_hi;
5352 dbd->addr_lo = sbd->addr_lo;
5355 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5356 tp->rx_std_ring_mask;
5357 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5358 tp->rx_std_ring_mask;
5362 src_prod_idx = spr->rx_jmb_prod_idx;
5364 /* Make sure updates to the rx_jmb_buffers[] entries and
5365 * the jumbo producer index are seen in the correct order.
5369 if (spr->rx_jmb_cons_idx == src_prod_idx)
5372 if (spr->rx_jmb_cons_idx < src_prod_idx)
5373 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5375 cpycnt = tp->rx_jmb_ring_mask + 1 -
5376 spr->rx_jmb_cons_idx;
5378 cpycnt = min(cpycnt,
5379 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5381 si = spr->rx_jmb_cons_idx;
5382 di = dpr->rx_jmb_prod_idx;
5384 for (i = di; i < di + cpycnt; i++) {
5385 if (dpr->rx_jmb_buffers[i].skb) {
5395 /* Ensure that updates to the rx_jmb_buffers ring and the
5396 * shadowed hardware producer ring from tg3_recycle_skb() are
5397 * ordered correctly WRT the skb check above.
5401 memcpy(&dpr->rx_jmb_buffers[di],
5402 &spr->rx_jmb_buffers[si],
5403 cpycnt * sizeof(struct ring_info));
5405 for (i = 0; i < cpycnt; i++, di++, si++) {
5406 struct tg3_rx_buffer_desc *sbd, *dbd;
5407 sbd = &spr->rx_jmb[si].std;
5408 dbd = &dpr->rx_jmb[di].std;
5409 dbd->addr_hi = sbd->addr_hi;
5410 dbd->addr_lo = sbd->addr_lo;
5413 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5414 tp->rx_jmb_ring_mask;
5415 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5416 tp->rx_jmb_ring_mask;
5422 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5424 struct tg3 *tp = tnapi->tp;
5426 /* run TX completion thread */
5427 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5429 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5433 /* run RX thread, within the bounds set by NAPI.
5434 * All RX "locking" is done by ensuring outside
5435 * code synchronizes with tg3->napi.poll()
5437 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5438 work_done += tg3_rx(tnapi, budget - work_done);
5440 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5441 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5443 u32 std_prod_idx = dpr->rx_std_prod_idx;
5444 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5446 for (i = 1; i < tp->irq_cnt; i++)
5447 err |= tg3_rx_prodring_xfer(tp, dpr,
5448 &tp->napi[i].prodring);
5452 if (std_prod_idx != dpr->rx_std_prod_idx)
5453 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5454 dpr->rx_std_prod_idx);
5456 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5457 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5458 dpr->rx_jmb_prod_idx);
5463 tw32_f(HOSTCC_MODE, tp->coal_now);
5469 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5471 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5472 struct tg3 *tp = tnapi->tp;
5474 struct tg3_hw_status *sblk = tnapi->hw_status;
5477 work_done = tg3_poll_work(tnapi, work_done, budget);
5479 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5482 if (unlikely(work_done >= budget))
5485 /* tp->last_tag is used in tg3_int_reenable() below
5486 * to tell the hw how much work has been processed,
5487 * so we must read it before checking for more work.
5489 tnapi->last_tag = sblk->status_tag;
5490 tnapi->last_irq_tag = tnapi->last_tag;
5493 /* check for RX/TX work to do */
5494 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5495 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5496 napi_complete(napi);
5497 /* Reenable interrupts. */
5498 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5507 /* work_done is guaranteed to be less than budget. */
5508 napi_complete(napi);
5509 schedule_work(&tp->reset_task);
5513 static void tg3_process_error(struct tg3 *tp)
5516 bool real_error = false;
5518 if (tg3_flag(tp, ERROR_PROCESSED))
5521 /* Check Flow Attention register */
5522 val = tr32(HOSTCC_FLOW_ATTN);
5523 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5524 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5528 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5529 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5533 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5534 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5543 tg3_flag_set(tp, ERROR_PROCESSED);
5544 schedule_work(&tp->reset_task);
5547 static int tg3_poll(struct napi_struct *napi, int budget)
5549 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5550 struct tg3 *tp = tnapi->tp;
5552 struct tg3_hw_status *sblk = tnapi->hw_status;
5555 if (sblk->status & SD_STATUS_ERROR)
5556 tg3_process_error(tp);
5560 work_done = tg3_poll_work(tnapi, work_done, budget);
5562 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5565 if (unlikely(work_done >= budget))
5568 if (tg3_flag(tp, TAGGED_STATUS)) {
5569 /* tp->last_tag is used in tg3_int_reenable() below
5570 * to tell the hw how much work has been processed,
5571 * so we must read it before checking for more work.
5573 tnapi->last_tag = sblk->status_tag;
5574 tnapi->last_irq_tag = tnapi->last_tag;
5577 sblk->status &= ~SD_STATUS_UPDATED;
5579 if (likely(!tg3_has_work(tnapi))) {
5580 napi_complete(napi);
5581 tg3_int_reenable(tnapi);
5589 /* work_done is guaranteed to be less than budget. */
5590 napi_complete(napi);
5591 schedule_work(&tp->reset_task);
5595 static void tg3_napi_disable(struct tg3 *tp)
5599 for (i = tp->irq_cnt - 1; i >= 0; i--)
5600 napi_disable(&tp->napi[i].napi);
5603 static void tg3_napi_enable(struct tg3 *tp)
5607 for (i = 0; i < tp->irq_cnt; i++)
5608 napi_enable(&tp->napi[i].napi);
5611 static void tg3_napi_init(struct tg3 *tp)
5615 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5616 for (i = 1; i < tp->irq_cnt; i++)
5617 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5620 static void tg3_napi_fini(struct tg3 *tp)
5624 for (i = 0; i < tp->irq_cnt; i++)
5625 netif_napi_del(&tp->napi[i].napi);
5628 static inline void tg3_netif_stop(struct tg3 *tp)
5630 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5631 tg3_napi_disable(tp);
5632 netif_tx_disable(tp->dev);
5635 static inline void tg3_netif_start(struct tg3 *tp)
5637 /* NOTE: unconditional netif_tx_wake_all_queues is only
5638 * appropriate so long as all callers are assured to
5639 * have free tx slots (such as after tg3_init_hw)
5641 netif_tx_wake_all_queues(tp->dev);
5643 tg3_napi_enable(tp);
5644 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5645 tg3_enable_ints(tp);
5648 static void tg3_irq_quiesce(struct tg3 *tp)
5652 BUG_ON(tp->irq_sync);
5657 for (i = 0; i < tp->irq_cnt; i++)
5658 synchronize_irq(tp->napi[i].irq_vec);
5661 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5662 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5663 * with as well. Most of the time, this is not necessary except when
5664 * shutting down the device.
5666 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5668 spin_lock_bh(&tp->lock);
5670 tg3_irq_quiesce(tp);
5673 static inline void tg3_full_unlock(struct tg3 *tp)
5675 spin_unlock_bh(&tp->lock);
5678 /* One-shot MSI handler - Chip automatically disables interrupt
5679 * after sending MSI so driver doesn't have to do it.
5681 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5683 struct tg3_napi *tnapi = dev_id;
5684 struct tg3 *tp = tnapi->tp;
5686 prefetch(tnapi->hw_status);
5688 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5690 if (likely(!tg3_irq_sync(tp)))
5691 napi_schedule(&tnapi->napi);
5696 /* MSI ISR - No need to check for interrupt sharing and no need to
5697 * flush status block and interrupt mailbox. PCI ordering rules
5698 * guarantee that MSI will arrive after the status block.
5700 static irqreturn_t tg3_msi(int irq, void *dev_id)
5702 struct tg3_napi *tnapi = dev_id;
5703 struct tg3 *tp = tnapi->tp;
5705 prefetch(tnapi->hw_status);
5707 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5709 * Writing any value to intr-mbox-0 clears PCI INTA# and
5710 * chip-internal interrupt pending events.
5711 * Writing non-zero to intr-mbox-0 additional tells the
5712 * NIC to stop sending us irqs, engaging "in-intr-handler"
5715 tw32_mailbox(tnapi->int_mbox, 0x00000001);
5716 if (likely(!tg3_irq_sync(tp)))
5717 napi_schedule(&tnapi->napi);
5719 return IRQ_RETVAL(1);
5722 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5724 struct tg3_napi *tnapi = dev_id;
5725 struct tg3 *tp = tnapi->tp;
5726 struct tg3_hw_status *sblk = tnapi->hw_status;
5727 unsigned int handled = 1;
5729 /* In INTx mode, it is possible for the interrupt to arrive at
5730 * the CPU before the status block posted prior to the interrupt.
5731 * Reading the PCI State register will confirm whether the
5732 * interrupt is ours and will flush the status block.
5734 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5735 if (tg3_flag(tp, CHIP_RESETTING) ||
5736 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5743 * Writing any value to intr-mbox-0 clears PCI INTA# and
5744 * chip-internal interrupt pending events.
5745 * Writing non-zero to intr-mbox-0 additional tells the
5746 * NIC to stop sending us irqs, engaging "in-intr-handler"
5749 * Flush the mailbox to de-assert the IRQ immediately to prevent
5750 * spurious interrupts. The flush impacts performance but
5751 * excessive spurious interrupts can be worse in some cases.
5753 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5754 if (tg3_irq_sync(tp))
5756 sblk->status &= ~SD_STATUS_UPDATED;
5757 if (likely(tg3_has_work(tnapi))) {
5758 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5759 napi_schedule(&tnapi->napi);
5761 /* No work, shared interrupt perhaps? re-enable
5762 * interrupts, and flush that PCI write
5764 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5768 return IRQ_RETVAL(handled);
5771 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5773 struct tg3_napi *tnapi = dev_id;
5774 struct tg3 *tp = tnapi->tp;
5775 struct tg3_hw_status *sblk = tnapi->hw_status;
5776 unsigned int handled = 1;
5778 /* In INTx mode, it is possible for the interrupt to arrive at
5779 * the CPU before the status block posted prior to the interrupt.
5780 * Reading the PCI State register will confirm whether the
5781 * interrupt is ours and will flush the status block.
5783 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5784 if (tg3_flag(tp, CHIP_RESETTING) ||
5785 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5792 * writing any value to intr-mbox-0 clears PCI INTA# and
5793 * chip-internal interrupt pending events.
5794 * writing non-zero to intr-mbox-0 additional tells the
5795 * NIC to stop sending us irqs, engaging "in-intr-handler"
5798 * Flush the mailbox to de-assert the IRQ immediately to prevent
5799 * spurious interrupts. The flush impacts performance but
5800 * excessive spurious interrupts can be worse in some cases.
5802 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5805 * In a shared interrupt configuration, sometimes other devices'
5806 * interrupts will scream. We record the current status tag here
5807 * so that the above check can report that the screaming interrupts
5808 * are unhandled. Eventually they will be silenced.
5810 tnapi->last_irq_tag = sblk->status_tag;
5812 if (tg3_irq_sync(tp))
5815 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5817 napi_schedule(&tnapi->napi);
5820 return IRQ_RETVAL(handled);
5823 /* ISR for interrupt test */
5824 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5826 struct tg3_napi *tnapi = dev_id;
5827 struct tg3 *tp = tnapi->tp;
5828 struct tg3_hw_status *sblk = tnapi->hw_status;
5830 if ((sblk->status & SD_STATUS_UPDATED) ||
5831 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5832 tg3_disable_ints(tp);
5833 return IRQ_RETVAL(1);
5835 return IRQ_RETVAL(0);
5838 static int tg3_init_hw(struct tg3 *, int);
5839 static int tg3_halt(struct tg3 *, int, int);
5841 /* Restart hardware after configuration changes, self-test, etc.
5842 * Invoked with tp->lock held.
5844 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5845 __releases(tp->lock)
5846 __acquires(tp->lock)
5850 err = tg3_init_hw(tp, reset_phy);
5853 "Failed to re-initialize device, aborting\n");
5854 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5855 tg3_full_unlock(tp);
5856 del_timer_sync(&tp->timer);
5858 tg3_napi_enable(tp);
5860 tg3_full_lock(tp, 0);
5865 #ifdef CONFIG_NET_POLL_CONTROLLER
5866 static void tg3_poll_controller(struct net_device *dev)
5869 struct tg3 *tp = netdev_priv(dev);
5871 for (i = 0; i < tp->irq_cnt; i++)
5872 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5876 static void tg3_reset_task(struct work_struct *work)
5878 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5880 unsigned int restart_timer;
5882 tg3_full_lock(tp, 0);
5884 if (!netif_running(tp->dev)) {
5885 tg3_full_unlock(tp);
5889 tg3_full_unlock(tp);
5895 tg3_full_lock(tp, 1);
5897 restart_timer = tg3_flag(tp, RESTART_TIMER);
5898 tg3_flag_clear(tp, RESTART_TIMER);
5900 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5901 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5902 tp->write32_rx_mbox = tg3_write_flush_reg32;
5903 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5904 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5907 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5908 err = tg3_init_hw(tp, 1);
5912 tg3_netif_start(tp);
5915 mod_timer(&tp->timer, jiffies + 1);
5918 tg3_full_unlock(tp);
5924 static void tg3_tx_timeout(struct net_device *dev)
5926 struct tg3 *tp = netdev_priv(dev);
5928 if (netif_msg_tx_err(tp)) {
5929 netdev_err(dev, "transmit timed out, resetting\n");
5933 schedule_work(&tp->reset_task);
5936 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5937 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5939 u32 base = (u32) mapping & 0xffffffff;
5941 return (base > 0xffffdcc0) && (base + len + 8 < base);
5944 /* Test for DMA addresses > 40-bit */
5945 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5948 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5949 if (tg3_flag(tp, 40BIT_DMA_BUG))
5950 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5957 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5958 dma_addr_t mapping, u32 len, u32 flags,
5961 txbd->addr_hi = ((u64) mapping >> 32);
5962 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5963 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5964 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5967 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5968 dma_addr_t map, u32 len, u32 flags,
5971 struct tg3 *tp = tnapi->tp;
5974 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5977 if (tg3_4g_overflow_test(map, len))
5980 if (tg3_40bit_overflow_test(tp, map, len))
5983 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
5984 u32 tmp_flag = flags & ~TXD_FLAG_END;
5985 while (len > TG3_TX_BD_DMA_MAX) {
5986 u32 frag_len = TG3_TX_BD_DMA_MAX;
5987 len -= TG3_TX_BD_DMA_MAX;
5990 tnapi->tx_buffers[*entry].fragmented = true;
5991 /* Avoid the 8byte DMA problem */
5993 len += TG3_TX_BD_DMA_MAX / 2;
5994 frag_len = TG3_TX_BD_DMA_MAX / 2;
6000 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6001 frag_len, tmp_flag, mss, vlan);
6003 *entry = NEXT_TX(*entry);
6014 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6015 len, flags, mss, vlan);
6017 *entry = NEXT_TX(*entry);
6023 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6024 len, flags, mss, vlan);
6025 *entry = NEXT_TX(*entry);
6031 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6034 struct sk_buff *skb;
6035 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6040 pci_unmap_single(tnapi->tp->pdev,
6041 dma_unmap_addr(txb, mapping),
6045 while (txb->fragmented) {
6046 txb->fragmented = false;
6047 entry = NEXT_TX(entry);
6048 txb = &tnapi->tx_buffers[entry];
6051 for (i = 0; i < last; i++) {
6052 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6054 entry = NEXT_TX(entry);
6055 txb = &tnapi->tx_buffers[entry];
6057 pci_unmap_page(tnapi->tp->pdev,
6058 dma_unmap_addr(txb, mapping),
6059 frag->size, PCI_DMA_TODEVICE);
6061 while (txb->fragmented) {
6062 txb->fragmented = false;
6063 entry = NEXT_TX(entry);
6064 txb = &tnapi->tx_buffers[entry];
6069 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6070 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6071 struct sk_buff *skb,
6072 u32 *entry, u32 *budget,
6073 u32 base_flags, u32 mss, u32 vlan)
6075 struct tg3 *tp = tnapi->tp;
6076 struct sk_buff *new_skb;
6077 dma_addr_t new_addr = 0;
6080 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6081 new_skb = skb_copy(skb, GFP_ATOMIC);
6083 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6085 new_skb = skb_copy_expand(skb,
6086 skb_headroom(skb) + more_headroom,
6087 skb_tailroom(skb), GFP_ATOMIC);
6093 /* New SKB is guaranteed to be linear. */
6094 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6096 /* Make sure the mapping succeeded */
6097 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6098 dev_kfree_skb(new_skb);
6101 base_flags |= TXD_FLAG_END;
6103 tnapi->tx_buffers[*entry].skb = new_skb;
6104 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6107 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6108 new_skb->len, base_flags,
6110 tg3_tx_skb_unmap(tnapi, *entry, 0);
6111 dev_kfree_skb(new_skb);
6122 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6124 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6125 * TSO header is greater than 80 bytes.
6127 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6129 struct sk_buff *segs, *nskb;
6130 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6132 /* Estimate the number of fragments in the worst case */
6133 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6134 netif_stop_queue(tp->dev);
6136 /* netif_tx_stop_queue() must be done before checking
6137 * checking tx index in tg3_tx_avail() below, because in
6138 * tg3_tx(), we update tx index before checking for
6139 * netif_tx_queue_stopped().
6142 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6143 return NETDEV_TX_BUSY;
6145 netif_wake_queue(tp->dev);
6148 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6150 goto tg3_tso_bug_end;
6156 tg3_start_xmit(nskb, tp->dev);
6162 return NETDEV_TX_OK;
6165 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6166 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6168 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6170 struct tg3 *tp = netdev_priv(dev);
6171 u32 len, entry, base_flags, mss, vlan = 0;
6173 int i = -1, would_hit_hwbug;
6175 struct tg3_napi *tnapi;
6176 struct netdev_queue *txq;
6179 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6180 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6181 if (tg3_flag(tp, ENABLE_TSS))
6184 budget = tg3_tx_avail(tnapi);
6186 /* We are running in BH disabled context with netif_tx_lock
6187 * and TX reclaim runs via tp->napi.poll inside of a software
6188 * interrupt. Furthermore, IRQ processing runs lockless so we have
6189 * no IRQ context deadlocks to worry about either. Rejoice!
6191 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6192 if (!netif_tx_queue_stopped(txq)) {
6193 netif_tx_stop_queue(txq);
6195 /* This is a hard error, log it. */
6197 "BUG! Tx Ring full when queue awake!\n");
6199 return NETDEV_TX_BUSY;
6202 entry = tnapi->tx_prod;
6204 if (skb->ip_summed == CHECKSUM_PARTIAL)
6205 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6207 mss = skb_shinfo(skb)->gso_size;
6210 u32 tcp_opt_len, hdr_len;
6212 if (skb_header_cloned(skb) &&
6213 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6219 tcp_opt_len = tcp_optlen(skb);
6221 if (skb_is_gso_v6(skb)) {
6222 hdr_len = skb_headlen(skb) - ETH_HLEN;
6226 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6227 hdr_len = ip_tcp_len + tcp_opt_len;
6230 iph->tot_len = htons(mss + hdr_len);
6233 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6234 tg3_flag(tp, TSO_BUG))
6235 return tg3_tso_bug(tp, skb);
6237 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6238 TXD_FLAG_CPU_POST_DMA);
6240 if (tg3_flag(tp, HW_TSO_1) ||
6241 tg3_flag(tp, HW_TSO_2) ||
6242 tg3_flag(tp, HW_TSO_3)) {
6243 tcp_hdr(skb)->check = 0;
6244 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6246 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6251 if (tg3_flag(tp, HW_TSO_3)) {
6252 mss |= (hdr_len & 0xc) << 12;
6254 base_flags |= 0x00000010;
6255 base_flags |= (hdr_len & 0x3e0) << 5;
6256 } else if (tg3_flag(tp, HW_TSO_2))
6257 mss |= hdr_len << 9;
6258 else if (tg3_flag(tp, HW_TSO_1) ||
6259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6260 if (tcp_opt_len || iph->ihl > 5) {
6263 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6264 mss |= (tsflags << 11);
6267 if (tcp_opt_len || iph->ihl > 5) {
6270 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6271 base_flags |= tsflags << 12;
6276 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6277 if (vlan_tx_tag_present(skb)) {
6278 base_flags |= TXD_FLAG_VLAN;
6279 vlan = vlan_tx_tag_get(skb);
6283 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6284 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6285 base_flags |= TXD_FLAG_JMB_PKT;
6287 len = skb_headlen(skb);
6289 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6290 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6295 tnapi->tx_buffers[entry].skb = skb;
6296 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6298 would_hit_hwbug = 0;
6300 if (tg3_flag(tp, 5701_DMA_BUG))
6301 would_hit_hwbug = 1;
6303 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6304 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6306 would_hit_hwbug = 1;
6308 /* Now loop through additional data fragments, and queue them. */
6309 if (skb_shinfo(skb)->nr_frags > 0) {
6312 if (!tg3_flag(tp, HW_TSO_1) &&
6313 !tg3_flag(tp, HW_TSO_2) &&
6314 !tg3_flag(tp, HW_TSO_3))
6317 last = skb_shinfo(skb)->nr_frags - 1;
6318 for (i = 0; i <= last; i++) {
6319 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6322 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6323 len, PCI_DMA_TODEVICE);
6325 tnapi->tx_buffers[entry].skb = NULL;
6326 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6328 if (pci_dma_mapping_error(tp->pdev, mapping))
6331 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6333 ((i == last) ? TXD_FLAG_END : 0),
6335 would_hit_hwbug = 1;
6339 if (would_hit_hwbug) {
6340 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6342 /* If the workaround fails due to memory/mapping
6343 * failure, silently drop this packet.
6345 entry = tnapi->tx_prod;
6346 budget = tg3_tx_avail(tnapi);
6347 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6348 base_flags, mss, vlan))
6352 skb_tx_timestamp(skb);
6354 /* Packets are ready, update Tx producer idx local and on card. */
6355 tw32_tx_mbox(tnapi->prodmbox, entry);
6357 tnapi->tx_prod = entry;
6358 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6359 netif_tx_stop_queue(txq);
6361 /* netif_tx_stop_queue() must be done before checking
6362 * checking tx index in tg3_tx_avail() below, because in
6363 * tg3_tx(), we update tx index before checking for
6364 * netif_tx_queue_stopped().
6367 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6368 netif_tx_wake_queue(txq);
6374 return NETDEV_TX_OK;
6377 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6379 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6380 return NETDEV_TX_OK;
6383 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6386 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6387 MAC_MODE_PORT_MODE_MASK);
6389 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6391 if (!tg3_flag(tp, 5705_PLUS))
6392 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6394 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6395 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6397 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6399 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6401 if (tg3_flag(tp, 5705_PLUS) ||
6402 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6404 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6407 tw32(MAC_MODE, tp->mac_mode);
6411 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6413 u32 val, bmcr, mac_mode, ptest = 0;
6415 tg3_phy_toggle_apd(tp, false);
6416 tg3_phy_toggle_automdix(tp, 0);
6418 if (extlpbk && tg3_phy_set_extloopbk(tp))
6421 bmcr = BMCR_FULLDPLX;
6426 bmcr |= BMCR_SPEED100;
6430 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6432 bmcr |= BMCR_SPEED100;
6435 bmcr |= BMCR_SPEED1000;
6440 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6441 tg3_readphy(tp, MII_CTRL1000, &val);
6442 val |= CTL1000_AS_MASTER |
6443 CTL1000_ENABLE_MASTER;
6444 tg3_writephy(tp, MII_CTRL1000, val);
6446 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6447 MII_TG3_FET_PTEST_TRIM_2;
6448 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6451 bmcr |= BMCR_LOOPBACK;
6453 tg3_writephy(tp, MII_BMCR, bmcr);
6455 /* The write needs to be flushed for the FETs */
6456 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6457 tg3_readphy(tp, MII_BMCR, &bmcr);
6461 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6463 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6464 MII_TG3_FET_PTEST_FRC_TX_LINK |
6465 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6467 /* The write needs to be flushed for the AC131 */
6468 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6471 /* Reset to prevent losing 1st rx packet intermittently */
6472 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6473 tg3_flag(tp, 5780_CLASS)) {
6474 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6476 tw32_f(MAC_RX_MODE, tp->rx_mode);
6479 mac_mode = tp->mac_mode &
6480 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6481 if (speed == SPEED_1000)
6482 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6484 mac_mode |= MAC_MODE_PORT_MODE_MII;
6486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6487 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6489 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6490 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6491 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6492 mac_mode |= MAC_MODE_LINK_POLARITY;
6494 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6495 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6498 tw32(MAC_MODE, mac_mode);
6504 static void tg3_set_loopback(struct net_device *dev, u32 features)
6506 struct tg3 *tp = netdev_priv(dev);
6508 if (features & NETIF_F_LOOPBACK) {
6509 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6512 spin_lock_bh(&tp->lock);
6513 tg3_mac_loopback(tp, true);
6514 netif_carrier_on(tp->dev);
6515 spin_unlock_bh(&tp->lock);
6516 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6518 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6521 spin_lock_bh(&tp->lock);
6522 tg3_mac_loopback(tp, false);
6523 /* Force link status check */
6524 tg3_setup_phy(tp, 1);
6525 spin_unlock_bh(&tp->lock);
6526 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6530 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6532 struct tg3 *tp = netdev_priv(dev);
6534 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6535 features &= ~NETIF_F_ALL_TSO;
6540 static int tg3_set_features(struct net_device *dev, u32 features)
6542 u32 changed = dev->features ^ features;
6544 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6545 tg3_set_loopback(dev, features);
6550 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6555 if (new_mtu > ETH_DATA_LEN) {
6556 if (tg3_flag(tp, 5780_CLASS)) {
6557 netdev_update_features(dev);
6558 tg3_flag_clear(tp, TSO_CAPABLE);
6560 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6563 if (tg3_flag(tp, 5780_CLASS)) {
6564 tg3_flag_set(tp, TSO_CAPABLE);
6565 netdev_update_features(dev);
6567 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6571 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6573 struct tg3 *tp = netdev_priv(dev);
6576 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6579 if (!netif_running(dev)) {
6580 /* We'll just catch it later when the
6583 tg3_set_mtu(dev, tp, new_mtu);
6591 tg3_full_lock(tp, 1);
6593 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6595 tg3_set_mtu(dev, tp, new_mtu);
6597 err = tg3_restart_hw(tp, 0);
6600 tg3_netif_start(tp);
6602 tg3_full_unlock(tp);
6610 static void tg3_rx_prodring_free(struct tg3 *tp,
6611 struct tg3_rx_prodring_set *tpr)
6615 if (tpr != &tp->napi[0].prodring) {
6616 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6617 i = (i + 1) & tp->rx_std_ring_mask)
6618 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6621 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6622 for (i = tpr->rx_jmb_cons_idx;
6623 i != tpr->rx_jmb_prod_idx;
6624 i = (i + 1) & tp->rx_jmb_ring_mask) {
6625 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6633 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6634 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6637 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6638 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6639 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6644 /* Initialize rx rings for packet processing.
6646 * The chip has been shut down and the driver detached from
6647 * the networking, so no interrupts or new tx packets will
6648 * end up in the driver. tp->{tx,}lock are held and thus
6651 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6652 struct tg3_rx_prodring_set *tpr)
6654 u32 i, rx_pkt_dma_sz;
6656 tpr->rx_std_cons_idx = 0;
6657 tpr->rx_std_prod_idx = 0;
6658 tpr->rx_jmb_cons_idx = 0;
6659 tpr->rx_jmb_prod_idx = 0;
6661 if (tpr != &tp->napi[0].prodring) {
6662 memset(&tpr->rx_std_buffers[0], 0,
6663 TG3_RX_STD_BUFF_RING_SIZE(tp));
6664 if (tpr->rx_jmb_buffers)
6665 memset(&tpr->rx_jmb_buffers[0], 0,
6666 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6670 /* Zero out all descriptors. */
6671 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6673 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6674 if (tg3_flag(tp, 5780_CLASS) &&
6675 tp->dev->mtu > ETH_DATA_LEN)
6676 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6677 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6679 /* Initialize invariants of the rings, we only set this
6680 * stuff once. This works because the card does not
6681 * write into the rx buffer posting rings.
6683 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6684 struct tg3_rx_buffer_desc *rxd;
6686 rxd = &tpr->rx_std[i];
6687 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6688 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6689 rxd->opaque = (RXD_OPAQUE_RING_STD |
6690 (i << RXD_OPAQUE_INDEX_SHIFT));
6693 /* Now allocate fresh SKBs for each rx ring. */
6694 for (i = 0; i < tp->rx_pending; i++) {
6695 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6696 netdev_warn(tp->dev,
6697 "Using a smaller RX standard ring. Only "
6698 "%d out of %d buffers were allocated "
6699 "successfully\n", i, tp->rx_pending);
6707 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6710 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6712 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6715 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6716 struct tg3_rx_buffer_desc *rxd;
6718 rxd = &tpr->rx_jmb[i].std;
6719 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6720 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6722 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6723 (i << RXD_OPAQUE_INDEX_SHIFT));
6726 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6727 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6728 netdev_warn(tp->dev,
6729 "Using a smaller RX jumbo ring. Only %d "
6730 "out of %d buffers were allocated "
6731 "successfully\n", i, tp->rx_jumbo_pending);
6734 tp->rx_jumbo_pending = i;
6743 tg3_rx_prodring_free(tp, tpr);
6747 static void tg3_rx_prodring_fini(struct tg3 *tp,
6748 struct tg3_rx_prodring_set *tpr)
6750 kfree(tpr->rx_std_buffers);
6751 tpr->rx_std_buffers = NULL;
6752 kfree(tpr->rx_jmb_buffers);
6753 tpr->rx_jmb_buffers = NULL;
6755 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6756 tpr->rx_std, tpr->rx_std_mapping);
6760 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6761 tpr->rx_jmb, tpr->rx_jmb_mapping);
6766 static int tg3_rx_prodring_init(struct tg3 *tp,
6767 struct tg3_rx_prodring_set *tpr)
6769 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6771 if (!tpr->rx_std_buffers)
6774 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6775 TG3_RX_STD_RING_BYTES(tp),
6776 &tpr->rx_std_mapping,
6781 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6782 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6784 if (!tpr->rx_jmb_buffers)
6787 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6788 TG3_RX_JMB_RING_BYTES(tp),
6789 &tpr->rx_jmb_mapping,
6798 tg3_rx_prodring_fini(tp, tpr);
6802 /* Free up pending packets in all rx/tx rings.
6804 * The chip has been shut down and the driver detached from
6805 * the networking, so no interrupts or new tx packets will
6806 * end up in the driver. tp->{tx,}lock is not held and we are not
6807 * in an interrupt context and thus may sleep.
6809 static void tg3_free_rings(struct tg3 *tp)
6813 for (j = 0; j < tp->irq_cnt; j++) {
6814 struct tg3_napi *tnapi = &tp->napi[j];
6816 tg3_rx_prodring_free(tp, &tnapi->prodring);
6818 if (!tnapi->tx_buffers)
6821 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6822 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6827 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6829 dev_kfree_skb_any(skb);
6834 /* Initialize tx/rx rings for packet processing.
6836 * The chip has been shut down and the driver detached from
6837 * the networking, so no interrupts or new tx packets will
6838 * end up in the driver. tp->{tx,}lock are held and thus
6841 static int tg3_init_rings(struct tg3 *tp)
6845 /* Free up all the SKBs. */
6848 for (i = 0; i < tp->irq_cnt; i++) {
6849 struct tg3_napi *tnapi = &tp->napi[i];
6851 tnapi->last_tag = 0;
6852 tnapi->last_irq_tag = 0;
6853 tnapi->hw_status->status = 0;
6854 tnapi->hw_status->status_tag = 0;
6855 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6860 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6862 tnapi->rx_rcb_ptr = 0;
6864 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6866 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6876 * Must not be invoked with interrupt sources disabled and
6877 * the hardware shutdown down.
6879 static void tg3_free_consistent(struct tg3 *tp)
6883 for (i = 0; i < tp->irq_cnt; i++) {
6884 struct tg3_napi *tnapi = &tp->napi[i];
6886 if (tnapi->tx_ring) {
6887 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6888 tnapi->tx_ring, tnapi->tx_desc_mapping);
6889 tnapi->tx_ring = NULL;
6892 kfree(tnapi->tx_buffers);
6893 tnapi->tx_buffers = NULL;
6895 if (tnapi->rx_rcb) {
6896 dma_free_coherent(&tp->pdev->dev,
6897 TG3_RX_RCB_RING_BYTES(tp),
6899 tnapi->rx_rcb_mapping);
6900 tnapi->rx_rcb = NULL;
6903 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6905 if (tnapi->hw_status) {
6906 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6908 tnapi->status_mapping);
6909 tnapi->hw_status = NULL;
6914 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6915 tp->hw_stats, tp->stats_mapping);
6916 tp->hw_stats = NULL;
6921 * Must not be invoked with interrupt sources disabled and
6922 * the hardware shutdown down. Can sleep.
6924 static int tg3_alloc_consistent(struct tg3 *tp)
6928 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6929 sizeof(struct tg3_hw_stats),
6935 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6937 for (i = 0; i < tp->irq_cnt; i++) {
6938 struct tg3_napi *tnapi = &tp->napi[i];
6939 struct tg3_hw_status *sblk;
6941 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6943 &tnapi->status_mapping,
6945 if (!tnapi->hw_status)
6948 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6949 sblk = tnapi->hw_status;
6951 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6954 /* If multivector TSS is enabled, vector 0 does not handle
6955 * tx interrupts. Don't allocate any resources for it.
6957 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6958 (i && tg3_flag(tp, ENABLE_TSS))) {
6959 tnapi->tx_buffers = kzalloc(
6960 sizeof(struct tg3_tx_ring_info) *
6961 TG3_TX_RING_SIZE, GFP_KERNEL);
6962 if (!tnapi->tx_buffers)
6965 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6967 &tnapi->tx_desc_mapping,
6969 if (!tnapi->tx_ring)
6974 * When RSS is enabled, the status block format changes
6975 * slightly. The "rx_jumbo_consumer", "reserved",
6976 * and "rx_mini_consumer" members get mapped to the
6977 * other three rx return ring producer indexes.
6981 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6984 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6987 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6990 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6995 * If multivector RSS is enabled, vector 0 does not handle
6996 * rx or tx interrupts. Don't allocate any resources for it.
6998 if (!i && tg3_flag(tp, ENABLE_RSS))
7001 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7002 TG3_RX_RCB_RING_BYTES(tp),
7003 &tnapi->rx_rcb_mapping,
7008 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7014 tg3_free_consistent(tp);
7018 #define MAX_WAIT_CNT 1000
7020 /* To stop a block, clear the enable bit and poll till it
7021 * clears. tp->lock is held.
7023 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7028 if (tg3_flag(tp, 5705_PLUS)) {
7035 /* We can't enable/disable these bits of the
7036 * 5705/5750, just say success.
7049 for (i = 0; i < MAX_WAIT_CNT; i++) {
7052 if ((val & enable_bit) == 0)
7056 if (i == MAX_WAIT_CNT && !silent) {
7057 dev_err(&tp->pdev->dev,
7058 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7066 /* tp->lock is held. */
7067 static int tg3_abort_hw(struct tg3 *tp, int silent)
7071 tg3_disable_ints(tp);
7073 tp->rx_mode &= ~RX_MODE_ENABLE;
7074 tw32_f(MAC_RX_MODE, tp->rx_mode);
7077 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7078 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7079 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7080 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7081 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7082 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7084 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7085 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7086 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7087 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7088 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7089 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7090 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7092 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7093 tw32_f(MAC_MODE, tp->mac_mode);
7096 tp->tx_mode &= ~TX_MODE_ENABLE;
7097 tw32_f(MAC_TX_MODE, tp->tx_mode);
7099 for (i = 0; i < MAX_WAIT_CNT; i++) {
7101 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7104 if (i >= MAX_WAIT_CNT) {
7105 dev_err(&tp->pdev->dev,
7106 "%s timed out, TX_MODE_ENABLE will not clear "
7107 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7111 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7112 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7113 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7115 tw32(FTQ_RESET, 0xffffffff);
7116 tw32(FTQ_RESET, 0x00000000);
7118 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7119 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7121 for (i = 0; i < tp->irq_cnt; i++) {
7122 struct tg3_napi *tnapi = &tp->napi[i];
7123 if (tnapi->hw_status)
7124 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7127 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7132 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
7137 /* NCSI does not support APE events */
7138 if (tg3_flag(tp, APE_HAS_NCSI))
7141 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
7142 if (apedata != APE_SEG_SIG_MAGIC)
7145 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
7146 if (!(apedata & APE_FW_STATUS_READY))
7149 /* Wait for up to 1 millisecond for APE to service previous event. */
7150 for (i = 0; i < 10; i++) {
7151 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
7154 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
7156 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7157 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
7158 event | APE_EVENT_STATUS_EVENT_PENDING);
7160 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
7162 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7168 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7169 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
7172 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
7177 if (!tg3_flag(tp, ENABLE_APE))
7181 case RESET_KIND_INIT:
7182 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
7183 APE_HOST_SEG_SIG_MAGIC);
7184 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7185 APE_HOST_SEG_LEN_MAGIC);
7186 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7187 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7188 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
7189 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
7190 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7191 APE_HOST_BEHAV_NO_PHYLOCK);
7192 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7193 TG3_APE_HOST_DRVR_STATE_START);
7195 event = APE_EVENT_STATUS_STATE_START;
7197 case RESET_KIND_SHUTDOWN:
7198 /* With the interface we are currently using,
7199 * APE does not track driver state. Wiping
7200 * out the HOST SEGMENT SIGNATURE forces
7201 * the APE to assume OS absent status.
7203 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7205 if (device_may_wakeup(&tp->pdev->dev) &&
7206 tg3_flag(tp, WOL_ENABLE)) {
7207 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7208 TG3_APE_HOST_WOL_SPEED_AUTO);
7209 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7211 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7213 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7215 event = APE_EVENT_STATUS_STATE_UNLOAD;
7217 case RESET_KIND_SUSPEND:
7218 event = APE_EVENT_STATUS_STATE_SUSPEND;
7224 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7226 tg3_ape_send_event(tp, event);
7229 /* tp->lock is held. */
7230 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7232 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7233 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7235 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7237 case RESET_KIND_INIT:
7238 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7242 case RESET_KIND_SHUTDOWN:
7243 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7247 case RESET_KIND_SUSPEND:
7248 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7257 if (kind == RESET_KIND_INIT ||
7258 kind == RESET_KIND_SUSPEND)
7259 tg3_ape_driver_state_change(tp, kind);
7262 /* tp->lock is held. */
7263 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7265 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7267 case RESET_KIND_INIT:
7268 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7269 DRV_STATE_START_DONE);
7272 case RESET_KIND_SHUTDOWN:
7273 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7274 DRV_STATE_UNLOAD_DONE);
7282 if (kind == RESET_KIND_SHUTDOWN)
7283 tg3_ape_driver_state_change(tp, kind);
7286 /* tp->lock is held. */
7287 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7289 if (tg3_flag(tp, ENABLE_ASF)) {
7291 case RESET_KIND_INIT:
7292 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7296 case RESET_KIND_SHUTDOWN:
7297 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7301 case RESET_KIND_SUSPEND:
7302 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7312 static int tg3_poll_fw(struct tg3 *tp)
7317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7318 /* Wait up to 20ms for init done. */
7319 for (i = 0; i < 200; i++) {
7320 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7327 /* Wait for firmware initialization to complete. */
7328 for (i = 0; i < 100000; i++) {
7329 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7330 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7335 /* Chip might not be fitted with firmware. Some Sun onboard
7336 * parts are configured like that. So don't signal the timeout
7337 * of the above loop as an error, but do report the lack of
7338 * running firmware once.
7340 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7341 tg3_flag_set(tp, NO_FWARE_REPORTED);
7343 netdev_info(tp->dev, "No firmware running\n");
7346 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7347 /* The 57765 A0 needs a little more
7348 * time to do some important work.
7356 /* Save PCI command register before chip reset */
7357 static void tg3_save_pci_state(struct tg3 *tp)
7359 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7362 /* Restore PCI state after chip reset */
7363 static void tg3_restore_pci_state(struct tg3 *tp)
7367 /* Re-enable indirect register accesses. */
7368 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7369 tp->misc_host_ctrl);
7371 /* Set MAX PCI retry to zero. */
7372 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7373 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7374 tg3_flag(tp, PCIX_MODE))
7375 val |= PCISTATE_RETRY_SAME_DMA;
7376 /* Allow reads and writes to the APE register and memory space. */
7377 if (tg3_flag(tp, ENABLE_APE))
7378 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7379 PCISTATE_ALLOW_APE_SHMEM_WR |
7380 PCISTATE_ALLOW_APE_PSPACE_WR;
7381 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7383 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7385 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7386 if (tg3_flag(tp, PCI_EXPRESS))
7387 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7389 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7390 tp->pci_cacheline_sz);
7391 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7396 /* Make sure PCI-X relaxed ordering bit is clear. */
7397 if (tg3_flag(tp, PCIX_MODE)) {
7400 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7402 pcix_cmd &= ~PCI_X_CMD_ERO;
7403 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7407 if (tg3_flag(tp, 5780_CLASS)) {
7409 /* Chip reset on 5780 will reset MSI enable bit,
7410 * so need to restore it.
7412 if (tg3_flag(tp, USING_MSI)) {
7415 pci_read_config_word(tp->pdev,
7416 tp->msi_cap + PCI_MSI_FLAGS,
7418 pci_write_config_word(tp->pdev,
7419 tp->msi_cap + PCI_MSI_FLAGS,
7420 ctrl | PCI_MSI_FLAGS_ENABLE);
7421 val = tr32(MSGINT_MODE);
7422 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7427 static void tg3_stop_fw(struct tg3 *);
7429 /* tp->lock is held. */
7430 static int tg3_chip_reset(struct tg3 *tp)
7433 void (*write_op)(struct tg3 *, u32, u32);
7438 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7440 /* No matching tg3_nvram_unlock() after this because
7441 * chip reset below will undo the nvram lock.
7443 tp->nvram_lock_cnt = 0;
7445 /* GRC_MISC_CFG core clock reset will clear the memory
7446 * enable bit in PCI register 4 and the MSI enable bit
7447 * on some chips, so we save relevant registers here.
7449 tg3_save_pci_state(tp);
7451 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7452 tg3_flag(tp, 5755_PLUS))
7453 tw32(GRC_FASTBOOT_PC, 0);
7456 * We must avoid the readl() that normally takes place.
7457 * It locks machines, causes machine checks, and other
7458 * fun things. So, temporarily disable the 5701
7459 * hardware workaround, while we do the reset.
7461 write_op = tp->write32;
7462 if (write_op == tg3_write_flush_reg32)
7463 tp->write32 = tg3_write32;
7465 /* Prevent the irq handler from reading or writing PCI registers
7466 * during chip reset when the memory enable bit in the PCI command
7467 * register may be cleared. The chip does not generate interrupt
7468 * at this time, but the irq handler may still be called due to irq
7469 * sharing or irqpoll.
7471 tg3_flag_set(tp, CHIP_RESETTING);
7472 for (i = 0; i < tp->irq_cnt; i++) {
7473 struct tg3_napi *tnapi = &tp->napi[i];
7474 if (tnapi->hw_status) {
7475 tnapi->hw_status->status = 0;
7476 tnapi->hw_status->status_tag = 0;
7478 tnapi->last_tag = 0;
7479 tnapi->last_irq_tag = 0;
7483 for (i = 0; i < tp->irq_cnt; i++)
7484 synchronize_irq(tp->napi[i].irq_vec);
7486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7487 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7488 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7492 val = GRC_MISC_CFG_CORECLK_RESET;
7494 if (tg3_flag(tp, PCI_EXPRESS)) {
7495 /* Force PCIe 1.0a mode */
7496 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7497 !tg3_flag(tp, 57765_PLUS) &&
7498 tr32(TG3_PCIE_PHY_TSTCTL) ==
7499 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7500 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7502 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7503 tw32(GRC_MISC_CFG, (1 << 29));
7508 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7509 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7510 tw32(GRC_VCPU_EXT_CTRL,
7511 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7514 /* Manage gphy power for all CPMU absent PCIe devices. */
7515 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7516 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7518 tw32(GRC_MISC_CFG, val);
7520 /* restore 5701 hardware bug workaround write method */
7521 tp->write32 = write_op;
7523 /* Unfortunately, we have to delay before the PCI read back.
7524 * Some 575X chips even will not respond to a PCI cfg access
7525 * when the reset command is given to the chip.
7527 * How do these hardware designers expect things to work
7528 * properly if the PCI write is posted for a long period
7529 * of time? It is always necessary to have some method by
7530 * which a register read back can occur to push the write
7531 * out which does the reset.
7533 * For most tg3 variants the trick below was working.
7538 /* Flush PCI posted writes. The normal MMIO registers
7539 * are inaccessible at this time so this is the only
7540 * way to make this reliably (actually, this is no longer
7541 * the case, see above). I tried to use indirect
7542 * register read/write but this upset some 5701 variants.
7544 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7548 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7551 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7555 /* Wait for link training to complete. */
7556 for (i = 0; i < 5000; i++)
7559 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7560 pci_write_config_dword(tp->pdev, 0xc4,
7561 cfg_val | (1 << 15));
7564 /* Clear the "no snoop" and "relaxed ordering" bits. */
7565 pci_read_config_word(tp->pdev,
7566 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7568 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7569 PCI_EXP_DEVCTL_NOSNOOP_EN);
7571 * Older PCIe devices only support the 128 byte
7572 * MPS setting. Enforce the restriction.
7574 if (!tg3_flag(tp, CPMU_PRESENT))
7575 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7576 pci_write_config_word(tp->pdev,
7577 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7580 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7582 /* Clear error status */
7583 pci_write_config_word(tp->pdev,
7584 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7585 PCI_EXP_DEVSTA_CED |
7586 PCI_EXP_DEVSTA_NFED |
7587 PCI_EXP_DEVSTA_FED |
7588 PCI_EXP_DEVSTA_URD);
7591 tg3_restore_pci_state(tp);
7593 tg3_flag_clear(tp, CHIP_RESETTING);
7594 tg3_flag_clear(tp, ERROR_PROCESSED);
7597 if (tg3_flag(tp, 5780_CLASS))
7598 val = tr32(MEMARB_MODE);
7599 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7601 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7603 tw32(0x5000, 0x400);
7606 tw32(GRC_MODE, tp->grc_mode);
7608 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7611 tw32(0xc4, val | (1 << 15));
7614 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7616 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7617 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7618 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7619 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7622 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7623 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7625 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7626 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7631 tw32_f(MAC_MODE, val);
7634 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7636 err = tg3_poll_fw(tp);
7642 if (tg3_flag(tp, PCI_EXPRESS) &&
7643 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7644 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7645 !tg3_flag(tp, 57765_PLUS)) {
7648 tw32(0x7c00, val | (1 << 25));
7651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7652 val = tr32(TG3_CPMU_CLCK_ORIDE);
7653 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7656 /* Reprobe ASF enable state. */
7657 tg3_flag_clear(tp, ENABLE_ASF);
7658 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7659 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7660 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7663 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7664 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7665 tg3_flag_set(tp, ENABLE_ASF);
7666 tp->last_event_jiffies = jiffies;
7667 if (tg3_flag(tp, 5750_PLUS))
7668 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7675 /* tp->lock is held. */
7676 static void tg3_stop_fw(struct tg3 *tp)
7678 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7679 /* Wait for RX cpu to ACK the previous event. */
7680 tg3_wait_for_event_ack(tp);
7682 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7684 tg3_generate_fw_event(tp);
7686 /* Wait for RX cpu to ACK this event. */
7687 tg3_wait_for_event_ack(tp);
7691 /* tp->lock is held. */
7692 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7698 tg3_write_sig_pre_reset(tp, kind);
7700 tg3_abort_hw(tp, silent);
7701 err = tg3_chip_reset(tp);
7703 __tg3_set_mac_addr(tp, 0);
7705 tg3_write_sig_legacy(tp, kind);
7706 tg3_write_sig_post_reset(tp, kind);
7714 #define RX_CPU_SCRATCH_BASE 0x30000
7715 #define RX_CPU_SCRATCH_SIZE 0x04000
7716 #define TX_CPU_SCRATCH_BASE 0x34000
7717 #define TX_CPU_SCRATCH_SIZE 0x04000
7719 /* tp->lock is held. */
7720 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7724 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7727 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7729 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7732 if (offset == RX_CPU_BASE) {
7733 for (i = 0; i < 10000; i++) {
7734 tw32(offset + CPU_STATE, 0xffffffff);
7735 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7736 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7740 tw32(offset + CPU_STATE, 0xffffffff);
7741 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7744 for (i = 0; i < 10000; i++) {
7745 tw32(offset + CPU_STATE, 0xffffffff);
7746 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7747 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7753 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7754 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7758 /* Clear firmware's nvram arbitration. */
7759 if (tg3_flag(tp, NVRAM))
7760 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7765 unsigned int fw_base;
7766 unsigned int fw_len;
7767 const __be32 *fw_data;
7770 /* tp->lock is held. */
7771 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7772 int cpu_scratch_size, struct fw_info *info)
7774 int err, lock_err, i;
7775 void (*write_op)(struct tg3 *, u32, u32);
7777 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7779 "%s: Trying to load TX cpu firmware which is 5705\n",
7784 if (tg3_flag(tp, 5705_PLUS))
7785 write_op = tg3_write_mem;
7787 write_op = tg3_write_indirect_reg32;
7789 /* It is possible that bootcode is still loading at this point.
7790 * Get the nvram lock first before halting the cpu.
7792 lock_err = tg3_nvram_lock(tp);
7793 err = tg3_halt_cpu(tp, cpu_base);
7795 tg3_nvram_unlock(tp);
7799 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7800 write_op(tp, cpu_scratch_base + i, 0);
7801 tw32(cpu_base + CPU_STATE, 0xffffffff);
7802 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7803 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7804 write_op(tp, (cpu_scratch_base +
7805 (info->fw_base & 0xffff) +
7807 be32_to_cpu(info->fw_data[i]));
7815 /* tp->lock is held. */
7816 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7818 struct fw_info info;
7819 const __be32 *fw_data;
7822 fw_data = (void *)tp->fw->data;
7824 /* Firmware blob starts with version numbers, followed by
7825 start address and length. We are setting complete length.
7826 length = end_address_of_bss - start_address_of_text.
7827 Remainder is the blob to be loaded contiguously
7828 from start address. */
7830 info.fw_base = be32_to_cpu(fw_data[1]);
7831 info.fw_len = tp->fw->size - 12;
7832 info.fw_data = &fw_data[3];
7834 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7835 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7840 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7841 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7846 /* Now startup only the RX cpu. */
7847 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7848 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7850 for (i = 0; i < 5; i++) {
7851 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7853 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7854 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7855 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7859 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7860 "should be %08x\n", __func__,
7861 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7864 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7865 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7870 /* tp->lock is held. */
7871 static int tg3_load_tso_firmware(struct tg3 *tp)
7873 struct fw_info info;
7874 const __be32 *fw_data;
7875 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7878 if (tg3_flag(tp, HW_TSO_1) ||
7879 tg3_flag(tp, HW_TSO_2) ||
7880 tg3_flag(tp, HW_TSO_3))
7883 fw_data = (void *)tp->fw->data;
7885 /* Firmware blob starts with version numbers, followed by
7886 start address and length. We are setting complete length.
7887 length = end_address_of_bss - start_address_of_text.
7888 Remainder is the blob to be loaded contiguously
7889 from start address. */
7891 info.fw_base = be32_to_cpu(fw_data[1]);
7892 cpu_scratch_size = tp->fw_len;
7893 info.fw_len = tp->fw->size - 12;
7894 info.fw_data = &fw_data[3];
7896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7897 cpu_base = RX_CPU_BASE;
7898 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7900 cpu_base = TX_CPU_BASE;
7901 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7902 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7905 err = tg3_load_firmware_cpu(tp, cpu_base,
7906 cpu_scratch_base, cpu_scratch_size,
7911 /* Now startup the cpu. */
7912 tw32(cpu_base + CPU_STATE, 0xffffffff);
7913 tw32_f(cpu_base + CPU_PC, info.fw_base);
7915 for (i = 0; i < 5; i++) {
7916 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7918 tw32(cpu_base + CPU_STATE, 0xffffffff);
7919 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7920 tw32_f(cpu_base + CPU_PC, info.fw_base);
7925 "%s fails to set CPU PC, is %08x should be %08x\n",
7926 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7929 tw32(cpu_base + CPU_STATE, 0xffffffff);
7930 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7935 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7937 struct tg3 *tp = netdev_priv(dev);
7938 struct sockaddr *addr = p;
7939 int err = 0, skip_mac_1 = 0;
7941 if (!is_valid_ether_addr(addr->sa_data))
7944 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7946 if (!netif_running(dev))
7949 if (tg3_flag(tp, ENABLE_ASF)) {
7950 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7952 addr0_high = tr32(MAC_ADDR_0_HIGH);
7953 addr0_low = tr32(MAC_ADDR_0_LOW);
7954 addr1_high = tr32(MAC_ADDR_1_HIGH);
7955 addr1_low = tr32(MAC_ADDR_1_LOW);
7957 /* Skip MAC addr 1 if ASF is using it. */
7958 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7959 !(addr1_high == 0 && addr1_low == 0))
7962 spin_lock_bh(&tp->lock);
7963 __tg3_set_mac_addr(tp, skip_mac_1);
7964 spin_unlock_bh(&tp->lock);
7969 /* tp->lock is held. */
7970 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7971 dma_addr_t mapping, u32 maxlen_flags,
7975 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7976 ((u64) mapping >> 32));
7978 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7979 ((u64) mapping & 0xffffffff));
7981 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7984 if (!tg3_flag(tp, 5705_PLUS))
7986 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7990 static void __tg3_set_rx_mode(struct net_device *);
7991 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7995 if (!tg3_flag(tp, ENABLE_TSS)) {
7996 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7997 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7998 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8000 tw32(HOSTCC_TXCOL_TICKS, 0);
8001 tw32(HOSTCC_TXMAX_FRAMES, 0);
8002 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8005 if (!tg3_flag(tp, ENABLE_RSS)) {
8006 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8007 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8008 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8010 tw32(HOSTCC_RXCOL_TICKS, 0);
8011 tw32(HOSTCC_RXMAX_FRAMES, 0);
8012 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8015 if (!tg3_flag(tp, 5705_PLUS)) {
8016 u32 val = ec->stats_block_coalesce_usecs;
8018 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8019 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8021 if (!netif_carrier_ok(tp->dev))
8024 tw32(HOSTCC_STAT_COAL_TICKS, val);
8027 for (i = 0; i < tp->irq_cnt - 1; i++) {
8030 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8031 tw32(reg, ec->rx_coalesce_usecs);
8032 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8033 tw32(reg, ec->rx_max_coalesced_frames);
8034 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8035 tw32(reg, ec->rx_max_coalesced_frames_irq);
8037 if (tg3_flag(tp, ENABLE_TSS)) {
8038 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8039 tw32(reg, ec->tx_coalesce_usecs);
8040 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8041 tw32(reg, ec->tx_max_coalesced_frames);
8042 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8043 tw32(reg, ec->tx_max_coalesced_frames_irq);
8047 for (; i < tp->irq_max - 1; i++) {
8048 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8049 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8050 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8052 if (tg3_flag(tp, ENABLE_TSS)) {
8053 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8054 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8055 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8060 /* tp->lock is held. */
8061 static void tg3_rings_reset(struct tg3 *tp)
8064 u32 stblk, txrcb, rxrcb, limit;
8065 struct tg3_napi *tnapi = &tp->napi[0];
8067 /* Disable all transmit rings but the first. */
8068 if (!tg3_flag(tp, 5705_PLUS))
8069 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8070 else if (tg3_flag(tp, 5717_PLUS))
8071 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8072 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8073 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8075 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8077 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8078 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8079 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8080 BDINFO_FLAGS_DISABLED);
8083 /* Disable all receive return rings but the first. */
8084 if (tg3_flag(tp, 5717_PLUS))
8085 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8086 else if (!tg3_flag(tp, 5705_PLUS))
8087 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8088 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8090 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8092 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8094 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8095 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8096 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8097 BDINFO_FLAGS_DISABLED);
8099 /* Disable interrupts */
8100 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8101 tp->napi[0].chk_msi_cnt = 0;
8102 tp->napi[0].last_rx_cons = 0;
8103 tp->napi[0].last_tx_cons = 0;
8105 /* Zero mailbox registers. */
8106 if (tg3_flag(tp, SUPPORT_MSIX)) {
8107 for (i = 1; i < tp->irq_max; i++) {
8108 tp->napi[i].tx_prod = 0;
8109 tp->napi[i].tx_cons = 0;
8110 if (tg3_flag(tp, ENABLE_TSS))
8111 tw32_mailbox(tp->napi[i].prodmbox, 0);
8112 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8113 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8114 tp->napi[i].chk_msi_cnt = 0;
8115 tp->napi[i].last_rx_cons = 0;
8116 tp->napi[i].last_tx_cons = 0;
8118 if (!tg3_flag(tp, ENABLE_TSS))
8119 tw32_mailbox(tp->napi[0].prodmbox, 0);
8121 tp->napi[0].tx_prod = 0;
8122 tp->napi[0].tx_cons = 0;
8123 tw32_mailbox(tp->napi[0].prodmbox, 0);
8124 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8127 /* Make sure the NIC-based send BD rings are disabled. */
8128 if (!tg3_flag(tp, 5705_PLUS)) {
8129 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8130 for (i = 0; i < 16; i++)
8131 tw32_tx_mbox(mbox + i * 8, 0);
8134 txrcb = NIC_SRAM_SEND_RCB;
8135 rxrcb = NIC_SRAM_RCV_RET_RCB;
8137 /* Clear status block in ram. */
8138 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8140 /* Set status block DMA address */
8141 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8142 ((u64) tnapi->status_mapping >> 32));
8143 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8144 ((u64) tnapi->status_mapping & 0xffffffff));
8146 if (tnapi->tx_ring) {
8147 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8148 (TG3_TX_RING_SIZE <<
8149 BDINFO_FLAGS_MAXLEN_SHIFT),
8150 NIC_SRAM_TX_BUFFER_DESC);
8151 txrcb += TG3_BDINFO_SIZE;
8154 if (tnapi->rx_rcb) {
8155 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8156 (tp->rx_ret_ring_mask + 1) <<
8157 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8158 rxrcb += TG3_BDINFO_SIZE;
8161 stblk = HOSTCC_STATBLCK_RING1;
8163 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8164 u64 mapping = (u64)tnapi->status_mapping;
8165 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8166 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8168 /* Clear status block in ram. */
8169 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8171 if (tnapi->tx_ring) {
8172 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8173 (TG3_TX_RING_SIZE <<
8174 BDINFO_FLAGS_MAXLEN_SHIFT),
8175 NIC_SRAM_TX_BUFFER_DESC);
8176 txrcb += TG3_BDINFO_SIZE;
8179 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8180 ((tp->rx_ret_ring_mask + 1) <<
8181 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8184 rxrcb += TG3_BDINFO_SIZE;
8188 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8190 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8192 if (!tg3_flag(tp, 5750_PLUS) ||
8193 tg3_flag(tp, 5780_CLASS) ||
8194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8196 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8197 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8198 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8199 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8201 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8203 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8204 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8206 val = min(nic_rep_thresh, host_rep_thresh);
8207 tw32(RCVBDI_STD_THRESH, val);
8209 if (tg3_flag(tp, 57765_PLUS))
8210 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8212 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8215 if (!tg3_flag(tp, 5705_PLUS))
8216 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8218 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8220 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8222 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8223 tw32(RCVBDI_JUMBO_THRESH, val);
8225 if (tg3_flag(tp, 57765_PLUS))
8226 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8229 /* tp->lock is held. */
8230 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8232 u32 val, rdmac_mode;
8234 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8236 tg3_disable_ints(tp);
8240 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8242 if (tg3_flag(tp, INIT_COMPLETE))
8243 tg3_abort_hw(tp, 1);
8245 /* Enable MAC control of LPI */
8246 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8247 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8248 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8249 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8251 tw32_f(TG3_CPMU_EEE_CTRL,
8252 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8254 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8255 TG3_CPMU_EEEMD_LPI_IN_TX |
8256 TG3_CPMU_EEEMD_LPI_IN_RX |
8257 TG3_CPMU_EEEMD_EEE_ENABLE;
8259 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8260 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8262 if (tg3_flag(tp, ENABLE_APE))
8263 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8265 tw32_f(TG3_CPMU_EEE_MODE, val);
8267 tw32_f(TG3_CPMU_EEE_DBTMR1,
8268 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8269 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8271 tw32_f(TG3_CPMU_EEE_DBTMR2,
8272 TG3_CPMU_DBTMR2_APE_TX_2047US |
8273 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8279 err = tg3_chip_reset(tp);
8283 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8285 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8286 val = tr32(TG3_CPMU_CTRL);
8287 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8288 tw32(TG3_CPMU_CTRL, val);
8290 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8291 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8292 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8293 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8295 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8296 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8297 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8298 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8300 val = tr32(TG3_CPMU_HST_ACC);
8301 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8302 val |= CPMU_HST_ACC_MACCLK_6_25;
8303 tw32(TG3_CPMU_HST_ACC, val);
8306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8307 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8308 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8309 PCIE_PWR_MGMT_L1_THRESH_4MS;
8310 tw32(PCIE_PWR_MGMT_THRESH, val);
8312 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8313 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8315 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8317 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8318 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8321 if (tg3_flag(tp, L1PLLPD_EN)) {
8322 u32 grc_mode = tr32(GRC_MODE);
8324 /* Access the lower 1K of PL PCIE block registers. */
8325 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8326 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8328 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8329 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8330 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8332 tw32(GRC_MODE, grc_mode);
8335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8336 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8337 u32 grc_mode = tr32(GRC_MODE);
8339 /* Access the lower 1K of PL PCIE block registers. */
8340 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8341 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8343 val = tr32(TG3_PCIE_TLDLPL_PORT +
8344 TG3_PCIE_PL_LO_PHYCTL5);
8345 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8346 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8348 tw32(GRC_MODE, grc_mode);
8351 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8352 u32 grc_mode = tr32(GRC_MODE);
8354 /* Access the lower 1K of DL PCIE block registers. */
8355 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8356 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8358 val = tr32(TG3_PCIE_TLDLPL_PORT +
8359 TG3_PCIE_DL_LO_FTSMAX);
8360 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8361 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8362 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8364 tw32(GRC_MODE, grc_mode);
8367 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8368 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8369 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8370 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8373 /* This works around an issue with Athlon chipsets on
8374 * B3 tigon3 silicon. This bit has no effect on any
8375 * other revision. But do not set this on PCI Express
8376 * chips and don't even touch the clocks if the CPMU is present.
8378 if (!tg3_flag(tp, CPMU_PRESENT)) {
8379 if (!tg3_flag(tp, PCI_EXPRESS))
8380 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8381 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8384 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8385 tg3_flag(tp, PCIX_MODE)) {
8386 val = tr32(TG3PCI_PCISTATE);
8387 val |= PCISTATE_RETRY_SAME_DMA;
8388 tw32(TG3PCI_PCISTATE, val);
8391 if (tg3_flag(tp, ENABLE_APE)) {
8392 /* Allow reads and writes to the
8393 * APE register and memory space.
8395 val = tr32(TG3PCI_PCISTATE);
8396 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8397 PCISTATE_ALLOW_APE_SHMEM_WR |
8398 PCISTATE_ALLOW_APE_PSPACE_WR;
8399 tw32(TG3PCI_PCISTATE, val);
8402 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8403 /* Enable some hw fixes. */
8404 val = tr32(TG3PCI_MSI_DATA);
8405 val |= (1 << 26) | (1 << 28) | (1 << 29);
8406 tw32(TG3PCI_MSI_DATA, val);
8409 /* Descriptor ring init may make accesses to the
8410 * NIC SRAM area to setup the TX descriptors, so we
8411 * can only do this after the hardware has been
8412 * successfully reset.
8414 err = tg3_init_rings(tp);
8418 if (tg3_flag(tp, 57765_PLUS)) {
8419 val = tr32(TG3PCI_DMA_RW_CTRL) &
8420 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8421 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8422 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8423 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8424 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8425 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8426 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8427 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8428 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8429 /* This value is determined during the probe time DMA
8430 * engine test, tg3_test_dma.
8432 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8435 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8436 GRC_MODE_4X_NIC_SEND_RINGS |
8437 GRC_MODE_NO_TX_PHDR_CSUM |
8438 GRC_MODE_NO_RX_PHDR_CSUM);
8439 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8441 /* Pseudo-header checksum is done by hardware logic and not
8442 * the offload processers, so make the chip do the pseudo-
8443 * header checksums on receive. For transmit it is more
8444 * convenient to do the pseudo-header checksum in software
8445 * as Linux does that on transmit for us in all cases.
8447 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8451 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8453 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8454 val = tr32(GRC_MISC_CFG);
8456 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8457 tw32(GRC_MISC_CFG, val);
8459 /* Initialize MBUF/DESC pool. */
8460 if (tg3_flag(tp, 5750_PLUS)) {
8462 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8463 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8465 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8467 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8468 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8469 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8470 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8473 fw_len = tp->fw_len;
8474 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8475 tw32(BUFMGR_MB_POOL_ADDR,
8476 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8477 tw32(BUFMGR_MB_POOL_SIZE,
8478 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8481 if (tp->dev->mtu <= ETH_DATA_LEN) {
8482 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8483 tp->bufmgr_config.mbuf_read_dma_low_water);
8484 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8485 tp->bufmgr_config.mbuf_mac_rx_low_water);
8486 tw32(BUFMGR_MB_HIGH_WATER,
8487 tp->bufmgr_config.mbuf_high_water);
8489 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8490 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8491 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8492 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8493 tw32(BUFMGR_MB_HIGH_WATER,
8494 tp->bufmgr_config.mbuf_high_water_jumbo);
8496 tw32(BUFMGR_DMA_LOW_WATER,
8497 tp->bufmgr_config.dma_low_water);
8498 tw32(BUFMGR_DMA_HIGH_WATER,
8499 tp->bufmgr_config.dma_high_water);
8501 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8502 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8503 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8505 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8506 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8507 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8508 tw32(BUFMGR_MODE, val);
8509 for (i = 0; i < 2000; i++) {
8510 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8515 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8519 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8520 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8522 tg3_setup_rxbd_thresholds(tp);
8524 /* Initialize TG3_BDINFO's at:
8525 * RCVDBDI_STD_BD: standard eth size rx ring
8526 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8527 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8530 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8531 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8532 * ring attribute flags
8533 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8535 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8536 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8538 * The size of each ring is fixed in the firmware, but the location is
8541 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8542 ((u64) tpr->rx_std_mapping >> 32));
8543 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8544 ((u64) tpr->rx_std_mapping & 0xffffffff));
8545 if (!tg3_flag(tp, 5717_PLUS))
8546 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8547 NIC_SRAM_RX_BUFFER_DESC);
8549 /* Disable the mini ring */
8550 if (!tg3_flag(tp, 5705_PLUS))
8551 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8552 BDINFO_FLAGS_DISABLED);
8554 /* Program the jumbo buffer descriptor ring control
8555 * blocks on those devices that have them.
8557 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8558 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8560 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8561 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8562 ((u64) tpr->rx_jmb_mapping >> 32));
8563 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8564 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8565 val = TG3_RX_JMB_RING_SIZE(tp) <<
8566 BDINFO_FLAGS_MAXLEN_SHIFT;
8567 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8568 val | BDINFO_FLAGS_USE_EXT_RECV);
8569 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8571 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8572 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8574 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8575 BDINFO_FLAGS_DISABLED);
8578 if (tg3_flag(tp, 57765_PLUS)) {
8579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8580 val = TG3_RX_STD_MAX_SIZE_5700;
8582 val = TG3_RX_STD_MAX_SIZE_5717;
8583 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8584 val |= (TG3_RX_STD_DMA_SZ << 2);
8586 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8588 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8590 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8592 tpr->rx_std_prod_idx = tp->rx_pending;
8593 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8595 tpr->rx_jmb_prod_idx =
8596 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8597 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8599 tg3_rings_reset(tp);
8601 /* Initialize MAC address and backoff seed. */
8602 __tg3_set_mac_addr(tp, 0);
8604 /* MTU + ethernet header + FCS + optional VLAN tag */
8605 tw32(MAC_RX_MTU_SIZE,
8606 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8608 /* The slot time is changed by tg3_setup_phy if we
8609 * run at gigabit with half duplex.
8611 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8612 (6 << TX_LENGTHS_IPG_SHIFT) |
8613 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8616 val |= tr32(MAC_TX_LENGTHS) &
8617 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8618 TX_LENGTHS_CNT_DWN_VAL_MSK);
8620 tw32(MAC_TX_LENGTHS, val);
8622 /* Receive rules. */
8623 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8624 tw32(RCVLPC_CONFIG, 0x0181);
8626 /* Calculate RDMAC_MODE setting early, we need it to determine
8627 * the RCVLPC_STATE_ENABLE mask.
8629 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8630 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8631 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8632 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8633 RDMAC_MODE_LNGREAD_ENAB);
8635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8636 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8639 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8640 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8641 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8642 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8643 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8646 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8647 if (tg3_flag(tp, TSO_CAPABLE) &&
8648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8649 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8650 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8651 !tg3_flag(tp, IS_5788)) {
8652 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8656 if (tg3_flag(tp, PCI_EXPRESS))
8657 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8659 if (tg3_flag(tp, HW_TSO_1) ||
8660 tg3_flag(tp, HW_TSO_2) ||
8661 tg3_flag(tp, HW_TSO_3))
8662 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8664 if (tg3_flag(tp, 57765_PLUS) ||
8665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8667 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8670 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8672 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8676 tg3_flag(tp, 57765_PLUS)) {
8677 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8680 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8681 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8682 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8683 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8684 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8685 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8687 tw32(TG3_RDMA_RSRVCTRL_REG,
8688 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8692 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8693 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8694 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8695 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8696 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8699 /* Receive/send statistics. */
8700 if (tg3_flag(tp, 5750_PLUS)) {
8701 val = tr32(RCVLPC_STATS_ENABLE);
8702 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8703 tw32(RCVLPC_STATS_ENABLE, val);
8704 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8705 tg3_flag(tp, TSO_CAPABLE)) {
8706 val = tr32(RCVLPC_STATS_ENABLE);
8707 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8708 tw32(RCVLPC_STATS_ENABLE, val);
8710 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8712 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8713 tw32(SNDDATAI_STATSENAB, 0xffffff);
8714 tw32(SNDDATAI_STATSCTRL,
8715 (SNDDATAI_SCTRL_ENABLE |
8716 SNDDATAI_SCTRL_FASTUPD));
8718 /* Setup host coalescing engine. */
8719 tw32(HOSTCC_MODE, 0);
8720 for (i = 0; i < 2000; i++) {
8721 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8726 __tg3_set_coalesce(tp, &tp->coal);
8728 if (!tg3_flag(tp, 5705_PLUS)) {
8729 /* Status/statistics block address. See tg3_timer,
8730 * the tg3_periodic_fetch_stats call there, and
8731 * tg3_get_stats to see how this works for 5705/5750 chips.
8733 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8734 ((u64) tp->stats_mapping >> 32));
8735 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8736 ((u64) tp->stats_mapping & 0xffffffff));
8737 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8739 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8741 /* Clear statistics and status block memory areas */
8742 for (i = NIC_SRAM_STATS_BLK;
8743 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8745 tg3_write_mem(tp, i, 0);
8750 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8752 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8753 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8754 if (!tg3_flag(tp, 5705_PLUS))
8755 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8757 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8758 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8759 /* reset to prevent losing 1st rx packet intermittently */
8760 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8764 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8765 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8766 MAC_MODE_FHDE_ENABLE;
8767 if (tg3_flag(tp, ENABLE_APE))
8768 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8769 if (!tg3_flag(tp, 5705_PLUS) &&
8770 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8771 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8772 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8773 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8776 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8777 * If TG3_FLAG_IS_NIC is zero, we should read the
8778 * register to preserve the GPIO settings for LOMs. The GPIOs,
8779 * whether used as inputs or outputs, are set by boot code after
8782 if (!tg3_flag(tp, IS_NIC)) {
8785 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8786 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8787 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8790 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8791 GRC_LCLCTRL_GPIO_OUTPUT3;
8793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8794 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8796 tp->grc_local_ctrl &= ~gpio_mask;
8797 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8799 /* GPIO1 must be driven high for eeprom write protect */
8800 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8801 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8802 GRC_LCLCTRL_GPIO_OUTPUT1);
8804 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8807 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8808 val = tr32(MSGINT_MODE);
8809 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8810 if (!tg3_flag(tp, 1SHOT_MSI))
8811 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8812 tw32(MSGINT_MODE, val);
8815 if (!tg3_flag(tp, 5705_PLUS)) {
8816 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8820 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8821 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8822 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8823 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8824 WDMAC_MODE_LNGREAD_ENAB);
8826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8827 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8828 if (tg3_flag(tp, TSO_CAPABLE) &&
8829 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8830 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8832 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8833 !tg3_flag(tp, IS_5788)) {
8834 val |= WDMAC_MODE_RX_ACCEL;
8838 /* Enable host coalescing bug fix */
8839 if (tg3_flag(tp, 5755_PLUS))
8840 val |= WDMAC_MODE_STATUS_TAG_FIX;
8842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8843 val |= WDMAC_MODE_BURST_ALL_DATA;
8845 tw32_f(WDMAC_MODE, val);
8848 if (tg3_flag(tp, PCIX_MODE)) {
8851 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8854 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8855 pcix_cmd |= PCI_X_CMD_READ_2K;
8856 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8857 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8858 pcix_cmd |= PCI_X_CMD_READ_2K;
8860 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8864 tw32_f(RDMAC_MODE, rdmac_mode);
8867 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8868 if (!tg3_flag(tp, 5705_PLUS))
8869 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8873 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8875 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8877 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8878 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8879 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8880 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8881 val |= RCVDBDI_MODE_LRG_RING_SZ;
8882 tw32(RCVDBDI_MODE, val);
8883 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8884 if (tg3_flag(tp, HW_TSO_1) ||
8885 tg3_flag(tp, HW_TSO_2) ||
8886 tg3_flag(tp, HW_TSO_3))
8887 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8888 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8889 if (tg3_flag(tp, ENABLE_TSS))
8890 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8891 tw32(SNDBDI_MODE, val);
8892 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8894 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8895 err = tg3_load_5701_a0_firmware_fix(tp);
8900 if (tg3_flag(tp, TSO_CAPABLE)) {
8901 err = tg3_load_tso_firmware(tp);
8906 tp->tx_mode = TX_MODE_ENABLE;
8908 if (tg3_flag(tp, 5755_PLUS) ||
8909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8910 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8912 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8913 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8914 tp->tx_mode &= ~val;
8915 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8918 tw32_f(MAC_TX_MODE, tp->tx_mode);
8921 if (tg3_flag(tp, ENABLE_RSS)) {
8923 u32 reg = MAC_RSS_INDIR_TBL_0;
8925 if (tp->irq_cnt == 2) {
8926 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8933 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8934 val = i % (tp->irq_cnt - 1);
8936 for (; i % 8; i++) {
8938 val |= (i % (tp->irq_cnt - 1));
8945 /* Setup the "secret" hash key. */
8946 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8947 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8948 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8949 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8950 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8951 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8952 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8953 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8954 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8955 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8958 tp->rx_mode = RX_MODE_ENABLE;
8959 if (tg3_flag(tp, 5755_PLUS))
8960 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8962 if (tg3_flag(tp, ENABLE_RSS))
8963 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8964 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8965 RX_MODE_RSS_IPV6_HASH_EN |
8966 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8967 RX_MODE_RSS_IPV4_HASH_EN |
8968 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8970 tw32_f(MAC_RX_MODE, tp->rx_mode);
8973 tw32(MAC_LED_CTRL, tp->led_ctrl);
8975 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8976 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8977 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8980 tw32_f(MAC_RX_MODE, tp->rx_mode);
8983 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8984 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8985 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8986 /* Set drive transmission level to 1.2V */
8987 /* only if the signal pre-emphasis bit is not set */
8988 val = tr32(MAC_SERDES_CFG);
8991 tw32(MAC_SERDES_CFG, val);
8993 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8994 tw32(MAC_SERDES_CFG, 0x616000);
8997 /* Prevent chip from dropping frames when flow control
9000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9004 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9007 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9008 /* Use hardware link auto-negotiation */
9009 tg3_flag_set(tp, HW_AUTONEG);
9012 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9016 tmp = tr32(SERDES_RX_CTRL);
9017 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9018 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9019 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9020 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9023 if (!tg3_flag(tp, USE_PHYLIB)) {
9024 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9025 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9026 tp->link_config.speed = tp->link_config.orig_speed;
9027 tp->link_config.duplex = tp->link_config.orig_duplex;
9028 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9031 err = tg3_setup_phy(tp, 0);
9035 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9036 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9039 /* Clear CRC stats. */
9040 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9041 tg3_writephy(tp, MII_TG3_TEST1,
9042 tmp | MII_TG3_TEST1_CRC_EN);
9043 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9048 __tg3_set_rx_mode(tp->dev);
9050 /* Initialize receive rules. */
9051 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9052 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9053 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9054 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9056 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9060 if (tg3_flag(tp, ENABLE_ASF))
9064 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9066 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9068 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9070 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9072 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9074 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9076 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9078 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9080 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9082 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9084 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9086 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9088 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9090 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9098 if (tg3_flag(tp, ENABLE_APE))
9099 /* Write our heartbeat update interval to APE. */
9100 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9101 APE_HOST_HEARTBEAT_INT_DISABLE);
9103 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9108 /* Called at device open time to get the chip ready for
9109 * packet processing. Invoked with tp->lock held.
9111 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9113 tg3_switch_clocks(tp);
9115 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9117 return tg3_reset_hw(tp, reset_phy);
9120 #define TG3_STAT_ADD32(PSTAT, REG) \
9121 do { u32 __val = tr32(REG); \
9122 (PSTAT)->low += __val; \
9123 if ((PSTAT)->low < __val) \
9124 (PSTAT)->high += 1; \
9127 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9129 struct tg3_hw_stats *sp = tp->hw_stats;
9131 if (!netif_carrier_ok(tp->dev))
9134 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9135 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9136 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9137 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9138 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9139 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9140 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9141 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9142 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9143 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9144 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9145 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9146 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9148 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9149 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9150 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9151 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9152 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9153 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9154 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9155 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9156 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9157 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9158 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9159 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9160 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9161 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9163 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9164 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9165 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9166 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9167 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9169 u32 val = tr32(HOSTCC_FLOW_ATTN);
9170 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9172 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9173 sp->rx_discards.low += val;
9174 if (sp->rx_discards.low < val)
9175 sp->rx_discards.high += 1;
9177 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9179 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9182 static void tg3_chk_missed_msi(struct tg3 *tp)
9186 for (i = 0; i < tp->irq_cnt; i++) {
9187 struct tg3_napi *tnapi = &tp->napi[i];
9189 if (tg3_has_work(tnapi)) {
9190 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9191 tnapi->last_tx_cons == tnapi->tx_cons) {
9192 if (tnapi->chk_msi_cnt < 1) {
9193 tnapi->chk_msi_cnt++;
9199 tnapi->chk_msi_cnt = 0;
9200 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9201 tnapi->last_tx_cons = tnapi->tx_cons;
9205 static void tg3_timer(unsigned long __opaque)
9207 struct tg3 *tp = (struct tg3 *) __opaque;
9212 spin_lock(&tp->lock);
9214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9216 tg3_chk_missed_msi(tp);
9218 if (!tg3_flag(tp, TAGGED_STATUS)) {
9219 /* All of this garbage is because when using non-tagged
9220 * IRQ status the mailbox/status_block protocol the chip
9221 * uses with the cpu is race prone.
9223 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9224 tw32(GRC_LOCAL_CTRL,
9225 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9227 tw32(HOSTCC_MODE, tp->coalesce_mode |
9228 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9231 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9232 tg3_flag_set(tp, RESTART_TIMER);
9233 spin_unlock(&tp->lock);
9234 schedule_work(&tp->reset_task);
9239 /* This part only runs once per second. */
9240 if (!--tp->timer_counter) {
9241 if (tg3_flag(tp, 5705_PLUS))
9242 tg3_periodic_fetch_stats(tp);
9244 if (tp->setlpicnt && !--tp->setlpicnt)
9245 tg3_phy_eee_enable(tp);
9247 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9251 mac_stat = tr32(MAC_STATUS);
9254 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9255 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9257 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9261 tg3_setup_phy(tp, 0);
9262 } else if (tg3_flag(tp, POLL_SERDES)) {
9263 u32 mac_stat = tr32(MAC_STATUS);
9266 if (netif_carrier_ok(tp->dev) &&
9267 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9270 if (!netif_carrier_ok(tp->dev) &&
9271 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9272 MAC_STATUS_SIGNAL_DET))) {
9276 if (!tp->serdes_counter) {
9279 ~MAC_MODE_PORT_MODE_MASK));
9281 tw32_f(MAC_MODE, tp->mac_mode);
9284 tg3_setup_phy(tp, 0);
9286 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9287 tg3_flag(tp, 5780_CLASS)) {
9288 tg3_serdes_parallel_detect(tp);
9291 tp->timer_counter = tp->timer_multiplier;
9294 /* Heartbeat is only sent once every 2 seconds.
9296 * The heartbeat is to tell the ASF firmware that the host
9297 * driver is still alive. In the event that the OS crashes,
9298 * ASF needs to reset the hardware to free up the FIFO space
9299 * that may be filled with rx packets destined for the host.
9300 * If the FIFO is full, ASF will no longer function properly.
9302 * Unintended resets have been reported on real time kernels
9303 * where the timer doesn't run on time. Netpoll will also have
9306 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9307 * to check the ring condition when the heartbeat is expiring
9308 * before doing the reset. This will prevent most unintended
9311 if (!--tp->asf_counter) {
9312 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9313 tg3_wait_for_event_ack(tp);
9315 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9316 FWCMD_NICDRV_ALIVE3);
9317 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9318 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9319 TG3_FW_UPDATE_TIMEOUT_SEC);
9321 tg3_generate_fw_event(tp);
9323 tp->asf_counter = tp->asf_multiplier;
9326 spin_unlock(&tp->lock);
9329 tp->timer.expires = jiffies + tp->timer_offset;
9330 add_timer(&tp->timer);
9333 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9336 unsigned long flags;
9338 struct tg3_napi *tnapi = &tp->napi[irq_num];
9340 if (tp->irq_cnt == 1)
9341 name = tp->dev->name;
9343 name = &tnapi->irq_lbl[0];
9344 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9345 name[IFNAMSIZ-1] = 0;
9348 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9350 if (tg3_flag(tp, 1SHOT_MSI))
9355 if (tg3_flag(tp, TAGGED_STATUS))
9356 fn = tg3_interrupt_tagged;
9357 flags = IRQF_SHARED;
9360 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9363 static int tg3_test_interrupt(struct tg3 *tp)
9365 struct tg3_napi *tnapi = &tp->napi[0];
9366 struct net_device *dev = tp->dev;
9367 int err, i, intr_ok = 0;
9370 if (!netif_running(dev))
9373 tg3_disable_ints(tp);
9375 free_irq(tnapi->irq_vec, tnapi);
9378 * Turn off MSI one shot mode. Otherwise this test has no
9379 * observable way to know whether the interrupt was delivered.
9381 if (tg3_flag(tp, 57765_PLUS)) {
9382 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9383 tw32(MSGINT_MODE, val);
9386 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9387 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9391 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9392 tg3_enable_ints(tp);
9394 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9397 for (i = 0; i < 5; i++) {
9398 u32 int_mbox, misc_host_ctrl;
9400 int_mbox = tr32_mailbox(tnapi->int_mbox);
9401 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9403 if ((int_mbox != 0) ||
9404 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9409 if (tg3_flag(tp, 57765_PLUS) &&
9410 tnapi->hw_status->status_tag != tnapi->last_tag)
9411 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9416 tg3_disable_ints(tp);
9418 free_irq(tnapi->irq_vec, tnapi);
9420 err = tg3_request_irq(tp, 0);
9426 /* Reenable MSI one shot mode. */
9427 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9428 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9429 tw32(MSGINT_MODE, val);
9437 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9438 * successfully restored
9440 static int tg3_test_msi(struct tg3 *tp)
9445 if (!tg3_flag(tp, USING_MSI))
9448 /* Turn off SERR reporting in case MSI terminates with Master
9451 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9452 pci_write_config_word(tp->pdev, PCI_COMMAND,
9453 pci_cmd & ~PCI_COMMAND_SERR);
9455 err = tg3_test_interrupt(tp);
9457 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9462 /* other failures */
9466 /* MSI test failed, go back to INTx mode */
9467 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9468 "to INTx mode. Please report this failure to the PCI "
9469 "maintainer and include system chipset information\n");
9471 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9473 pci_disable_msi(tp->pdev);
9475 tg3_flag_clear(tp, USING_MSI);
9476 tp->napi[0].irq_vec = tp->pdev->irq;
9478 err = tg3_request_irq(tp, 0);
9482 /* Need to reset the chip because the MSI cycle may have terminated
9483 * with Master Abort.
9485 tg3_full_lock(tp, 1);
9487 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9488 err = tg3_init_hw(tp, 1);
9490 tg3_full_unlock(tp);
9493 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9498 static int tg3_request_firmware(struct tg3 *tp)
9500 const __be32 *fw_data;
9502 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9503 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9508 fw_data = (void *)tp->fw->data;
9510 /* Firmware blob starts with version numbers, followed by
9511 * start address and _full_ length including BSS sections
9512 * (which must be longer than the actual data, of course
9515 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9516 if (tp->fw_len < (tp->fw->size - 12)) {
9517 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9518 tp->fw_len, tp->fw_needed);
9519 release_firmware(tp->fw);
9524 /* We no longer need firmware; we have it. */
9525 tp->fw_needed = NULL;
9529 static bool tg3_enable_msix(struct tg3 *tp)
9531 int i, rc, cpus = num_online_cpus();
9532 struct msix_entry msix_ent[tp->irq_max];
9535 /* Just fallback to the simpler MSI mode. */
9539 * We want as many rx rings enabled as there are cpus.
9540 * The first MSIX vector only deals with link interrupts, etc,
9541 * so we add one to the number of vectors we are requesting.
9543 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9545 for (i = 0; i < tp->irq_max; i++) {
9546 msix_ent[i].entry = i;
9547 msix_ent[i].vector = 0;
9550 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9553 } else if (rc != 0) {
9554 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9556 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9561 for (i = 0; i < tp->irq_max; i++)
9562 tp->napi[i].irq_vec = msix_ent[i].vector;
9564 netif_set_real_num_tx_queues(tp->dev, 1);
9565 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9566 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9567 pci_disable_msix(tp->pdev);
9571 if (tp->irq_cnt > 1) {
9572 tg3_flag_set(tp, ENABLE_RSS);
9574 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9576 tg3_flag_set(tp, ENABLE_TSS);
9577 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9584 static void tg3_ints_init(struct tg3 *tp)
9586 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9587 !tg3_flag(tp, TAGGED_STATUS)) {
9588 /* All MSI supporting chips should support tagged
9589 * status. Assert that this is the case.
9591 netdev_warn(tp->dev,
9592 "MSI without TAGGED_STATUS? Not using MSI\n");
9596 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9597 tg3_flag_set(tp, USING_MSIX);
9598 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9599 tg3_flag_set(tp, USING_MSI);
9601 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9602 u32 msi_mode = tr32(MSGINT_MODE);
9603 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9604 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9605 if (!tg3_flag(tp, 1SHOT_MSI))
9606 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9607 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9610 if (!tg3_flag(tp, USING_MSIX)) {
9612 tp->napi[0].irq_vec = tp->pdev->irq;
9613 netif_set_real_num_tx_queues(tp->dev, 1);
9614 netif_set_real_num_rx_queues(tp->dev, 1);
9618 static void tg3_ints_fini(struct tg3 *tp)
9620 if (tg3_flag(tp, USING_MSIX))
9621 pci_disable_msix(tp->pdev);
9622 else if (tg3_flag(tp, USING_MSI))
9623 pci_disable_msi(tp->pdev);
9624 tg3_flag_clear(tp, USING_MSI);
9625 tg3_flag_clear(tp, USING_MSIX);
9626 tg3_flag_clear(tp, ENABLE_RSS);
9627 tg3_flag_clear(tp, ENABLE_TSS);
9630 static int tg3_open(struct net_device *dev)
9632 struct tg3 *tp = netdev_priv(dev);
9635 if (tp->fw_needed) {
9636 err = tg3_request_firmware(tp);
9637 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9641 netdev_warn(tp->dev, "TSO capability disabled\n");
9642 tg3_flag_clear(tp, TSO_CAPABLE);
9643 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9644 netdev_notice(tp->dev, "TSO capability restored\n");
9645 tg3_flag_set(tp, TSO_CAPABLE);
9649 netif_carrier_off(tp->dev);
9651 err = tg3_power_up(tp);
9655 tg3_full_lock(tp, 0);
9657 tg3_disable_ints(tp);
9658 tg3_flag_clear(tp, INIT_COMPLETE);
9660 tg3_full_unlock(tp);
9663 * Setup interrupts first so we know how
9664 * many NAPI resources to allocate
9668 /* The placement of this call is tied
9669 * to the setup and use of Host TX descriptors.
9671 err = tg3_alloc_consistent(tp);
9677 tg3_napi_enable(tp);
9679 for (i = 0; i < tp->irq_cnt; i++) {
9680 struct tg3_napi *tnapi = &tp->napi[i];
9681 err = tg3_request_irq(tp, i);
9683 for (i--; i >= 0; i--)
9684 free_irq(tnapi->irq_vec, tnapi);
9692 tg3_full_lock(tp, 0);
9694 err = tg3_init_hw(tp, 1);
9696 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9699 if (tg3_flag(tp, TAGGED_STATUS) &&
9700 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9701 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9702 tp->timer_offset = HZ;
9704 tp->timer_offset = HZ / 10;
9706 BUG_ON(tp->timer_offset > HZ);
9707 tp->timer_counter = tp->timer_multiplier =
9708 (HZ / tp->timer_offset);
9709 tp->asf_counter = tp->asf_multiplier =
9710 ((HZ / tp->timer_offset) * 2);
9712 init_timer(&tp->timer);
9713 tp->timer.expires = jiffies + tp->timer_offset;
9714 tp->timer.data = (unsigned long) tp;
9715 tp->timer.function = tg3_timer;
9718 tg3_full_unlock(tp);
9723 if (tg3_flag(tp, USING_MSI)) {
9724 err = tg3_test_msi(tp);
9727 tg3_full_lock(tp, 0);
9728 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9730 tg3_full_unlock(tp);
9735 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9736 u32 val = tr32(PCIE_TRANSACTION_CFG);
9738 tw32(PCIE_TRANSACTION_CFG,
9739 val | PCIE_TRANS_CFG_1SHOT_MSI);
9745 tg3_full_lock(tp, 0);
9747 add_timer(&tp->timer);
9748 tg3_flag_set(tp, INIT_COMPLETE);
9749 tg3_enable_ints(tp);
9751 tg3_full_unlock(tp);
9753 netif_tx_start_all_queues(dev);
9756 * Reset loopback feature if it was turned on while the device was down
9757 * make sure that it's installed properly now.
9759 if (dev->features & NETIF_F_LOOPBACK)
9760 tg3_set_loopback(dev, dev->features);
9765 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9766 struct tg3_napi *tnapi = &tp->napi[i];
9767 free_irq(tnapi->irq_vec, tnapi);
9771 tg3_napi_disable(tp);
9773 tg3_free_consistent(tp);
9777 tg3_frob_aux_power(tp, false);
9778 pci_set_power_state(tp->pdev, PCI_D3hot);
9782 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9783 struct rtnl_link_stats64 *);
9784 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9786 static int tg3_close(struct net_device *dev)
9789 struct tg3 *tp = netdev_priv(dev);
9791 tg3_napi_disable(tp);
9792 cancel_work_sync(&tp->reset_task);
9794 netif_tx_stop_all_queues(dev);
9796 del_timer_sync(&tp->timer);
9800 tg3_full_lock(tp, 1);
9802 tg3_disable_ints(tp);
9804 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9806 tg3_flag_clear(tp, INIT_COMPLETE);
9808 tg3_full_unlock(tp);
9810 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9811 struct tg3_napi *tnapi = &tp->napi[i];
9812 free_irq(tnapi->irq_vec, tnapi);
9817 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9819 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9820 sizeof(tp->estats_prev));
9824 tg3_free_consistent(tp);
9828 netif_carrier_off(tp->dev);
9833 static inline u64 get_stat64(tg3_stat64_t *val)
9835 return ((u64)val->high << 32) | ((u64)val->low);
9838 static u64 calc_crc_errors(struct tg3 *tp)
9840 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9842 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9843 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9847 spin_lock_bh(&tp->lock);
9848 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9849 tg3_writephy(tp, MII_TG3_TEST1,
9850 val | MII_TG3_TEST1_CRC_EN);
9851 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9854 spin_unlock_bh(&tp->lock);
9856 tp->phy_crc_errors += val;
9858 return tp->phy_crc_errors;
9861 return get_stat64(&hw_stats->rx_fcs_errors);
9864 #define ESTAT_ADD(member) \
9865 estats->member = old_estats->member + \
9866 get_stat64(&hw_stats->member)
9868 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9870 struct tg3_ethtool_stats *estats = &tp->estats;
9871 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9872 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9877 ESTAT_ADD(rx_octets);
9878 ESTAT_ADD(rx_fragments);
9879 ESTAT_ADD(rx_ucast_packets);
9880 ESTAT_ADD(rx_mcast_packets);
9881 ESTAT_ADD(rx_bcast_packets);
9882 ESTAT_ADD(rx_fcs_errors);
9883 ESTAT_ADD(rx_align_errors);
9884 ESTAT_ADD(rx_xon_pause_rcvd);
9885 ESTAT_ADD(rx_xoff_pause_rcvd);
9886 ESTAT_ADD(rx_mac_ctrl_rcvd);
9887 ESTAT_ADD(rx_xoff_entered);
9888 ESTAT_ADD(rx_frame_too_long_errors);
9889 ESTAT_ADD(rx_jabbers);
9890 ESTAT_ADD(rx_undersize_packets);
9891 ESTAT_ADD(rx_in_length_errors);
9892 ESTAT_ADD(rx_out_length_errors);
9893 ESTAT_ADD(rx_64_or_less_octet_packets);
9894 ESTAT_ADD(rx_65_to_127_octet_packets);
9895 ESTAT_ADD(rx_128_to_255_octet_packets);
9896 ESTAT_ADD(rx_256_to_511_octet_packets);
9897 ESTAT_ADD(rx_512_to_1023_octet_packets);
9898 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9899 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9900 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9901 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9902 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9904 ESTAT_ADD(tx_octets);
9905 ESTAT_ADD(tx_collisions);
9906 ESTAT_ADD(tx_xon_sent);
9907 ESTAT_ADD(tx_xoff_sent);
9908 ESTAT_ADD(tx_flow_control);
9909 ESTAT_ADD(tx_mac_errors);
9910 ESTAT_ADD(tx_single_collisions);
9911 ESTAT_ADD(tx_mult_collisions);
9912 ESTAT_ADD(tx_deferred);
9913 ESTAT_ADD(tx_excessive_collisions);
9914 ESTAT_ADD(tx_late_collisions);
9915 ESTAT_ADD(tx_collide_2times);
9916 ESTAT_ADD(tx_collide_3times);
9917 ESTAT_ADD(tx_collide_4times);
9918 ESTAT_ADD(tx_collide_5times);
9919 ESTAT_ADD(tx_collide_6times);
9920 ESTAT_ADD(tx_collide_7times);
9921 ESTAT_ADD(tx_collide_8times);
9922 ESTAT_ADD(tx_collide_9times);
9923 ESTAT_ADD(tx_collide_10times);
9924 ESTAT_ADD(tx_collide_11times);
9925 ESTAT_ADD(tx_collide_12times);
9926 ESTAT_ADD(tx_collide_13times);
9927 ESTAT_ADD(tx_collide_14times);
9928 ESTAT_ADD(tx_collide_15times);
9929 ESTAT_ADD(tx_ucast_packets);
9930 ESTAT_ADD(tx_mcast_packets);
9931 ESTAT_ADD(tx_bcast_packets);
9932 ESTAT_ADD(tx_carrier_sense_errors);
9933 ESTAT_ADD(tx_discards);
9934 ESTAT_ADD(tx_errors);
9936 ESTAT_ADD(dma_writeq_full);
9937 ESTAT_ADD(dma_write_prioq_full);
9938 ESTAT_ADD(rxbds_empty);
9939 ESTAT_ADD(rx_discards);
9940 ESTAT_ADD(rx_errors);
9941 ESTAT_ADD(rx_threshold_hit);
9943 ESTAT_ADD(dma_readq_full);
9944 ESTAT_ADD(dma_read_prioq_full);
9945 ESTAT_ADD(tx_comp_queue_full);
9947 ESTAT_ADD(ring_set_send_prod_index);
9948 ESTAT_ADD(ring_status_update);
9949 ESTAT_ADD(nic_irqs);
9950 ESTAT_ADD(nic_avoided_irqs);
9951 ESTAT_ADD(nic_tx_threshold_hit);
9953 ESTAT_ADD(mbuf_lwm_thresh_hit);
9958 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9959 struct rtnl_link_stats64 *stats)
9961 struct tg3 *tp = netdev_priv(dev);
9962 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9963 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9968 stats->rx_packets = old_stats->rx_packets +
9969 get_stat64(&hw_stats->rx_ucast_packets) +
9970 get_stat64(&hw_stats->rx_mcast_packets) +
9971 get_stat64(&hw_stats->rx_bcast_packets);
9973 stats->tx_packets = old_stats->tx_packets +
9974 get_stat64(&hw_stats->tx_ucast_packets) +
9975 get_stat64(&hw_stats->tx_mcast_packets) +
9976 get_stat64(&hw_stats->tx_bcast_packets);
9978 stats->rx_bytes = old_stats->rx_bytes +
9979 get_stat64(&hw_stats->rx_octets);
9980 stats->tx_bytes = old_stats->tx_bytes +
9981 get_stat64(&hw_stats->tx_octets);
9983 stats->rx_errors = old_stats->rx_errors +
9984 get_stat64(&hw_stats->rx_errors);
9985 stats->tx_errors = old_stats->tx_errors +
9986 get_stat64(&hw_stats->tx_errors) +
9987 get_stat64(&hw_stats->tx_mac_errors) +
9988 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9989 get_stat64(&hw_stats->tx_discards);
9991 stats->multicast = old_stats->multicast +
9992 get_stat64(&hw_stats->rx_mcast_packets);
9993 stats->collisions = old_stats->collisions +
9994 get_stat64(&hw_stats->tx_collisions);
9996 stats->rx_length_errors = old_stats->rx_length_errors +
9997 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9998 get_stat64(&hw_stats->rx_undersize_packets);
10000 stats->rx_over_errors = old_stats->rx_over_errors +
10001 get_stat64(&hw_stats->rxbds_empty);
10002 stats->rx_frame_errors = old_stats->rx_frame_errors +
10003 get_stat64(&hw_stats->rx_align_errors);
10004 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10005 get_stat64(&hw_stats->tx_discards);
10006 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10007 get_stat64(&hw_stats->tx_carrier_sense_errors);
10009 stats->rx_crc_errors = old_stats->rx_crc_errors +
10010 calc_crc_errors(tp);
10012 stats->rx_missed_errors = old_stats->rx_missed_errors +
10013 get_stat64(&hw_stats->rx_discards);
10015 stats->rx_dropped = tp->rx_dropped;
10020 static inline u32 calc_crc(unsigned char *buf, int len)
10028 for (j = 0; j < len; j++) {
10031 for (k = 0; k < 8; k++) {
10044 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10046 /* accept or reject all multicast frames */
10047 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10048 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10049 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10050 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10053 static void __tg3_set_rx_mode(struct net_device *dev)
10055 struct tg3 *tp = netdev_priv(dev);
10058 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10059 RX_MODE_KEEP_VLAN_TAG);
10061 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10062 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10065 if (!tg3_flag(tp, ENABLE_ASF))
10066 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10069 if (dev->flags & IFF_PROMISC) {
10070 /* Promiscuous mode. */
10071 rx_mode |= RX_MODE_PROMISC;
10072 } else if (dev->flags & IFF_ALLMULTI) {
10073 /* Accept all multicast. */
10074 tg3_set_multi(tp, 1);
10075 } else if (netdev_mc_empty(dev)) {
10076 /* Reject all multicast. */
10077 tg3_set_multi(tp, 0);
10079 /* Accept one or more multicast(s). */
10080 struct netdev_hw_addr *ha;
10081 u32 mc_filter[4] = { 0, };
10086 netdev_for_each_mc_addr(ha, dev) {
10087 crc = calc_crc(ha->addr, ETH_ALEN);
10089 regidx = (bit & 0x60) >> 5;
10091 mc_filter[regidx] |= (1 << bit);
10094 tw32(MAC_HASH_REG_0, mc_filter[0]);
10095 tw32(MAC_HASH_REG_1, mc_filter[1]);
10096 tw32(MAC_HASH_REG_2, mc_filter[2]);
10097 tw32(MAC_HASH_REG_3, mc_filter[3]);
10100 if (rx_mode != tp->rx_mode) {
10101 tp->rx_mode = rx_mode;
10102 tw32_f(MAC_RX_MODE, rx_mode);
10107 static void tg3_set_rx_mode(struct net_device *dev)
10109 struct tg3 *tp = netdev_priv(dev);
10111 if (!netif_running(dev))
10114 tg3_full_lock(tp, 0);
10115 __tg3_set_rx_mode(dev);
10116 tg3_full_unlock(tp);
10119 static int tg3_get_regs_len(struct net_device *dev)
10121 return TG3_REG_BLK_SIZE;
10124 static void tg3_get_regs(struct net_device *dev,
10125 struct ethtool_regs *regs, void *_p)
10127 struct tg3 *tp = netdev_priv(dev);
10131 memset(_p, 0, TG3_REG_BLK_SIZE);
10133 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10136 tg3_full_lock(tp, 0);
10138 tg3_dump_legacy_regs(tp, (u32 *)_p);
10140 tg3_full_unlock(tp);
10143 static int tg3_get_eeprom_len(struct net_device *dev)
10145 struct tg3 *tp = netdev_priv(dev);
10147 return tp->nvram_size;
10150 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10152 struct tg3 *tp = netdev_priv(dev);
10155 u32 i, offset, len, b_offset, b_count;
10158 if (tg3_flag(tp, NO_NVRAM))
10161 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10164 offset = eeprom->offset;
10168 eeprom->magic = TG3_EEPROM_MAGIC;
10171 /* adjustments to start on required 4 byte boundary */
10172 b_offset = offset & 3;
10173 b_count = 4 - b_offset;
10174 if (b_count > len) {
10175 /* i.e. offset=1 len=2 */
10178 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10181 memcpy(data, ((char *)&val) + b_offset, b_count);
10184 eeprom->len += b_count;
10187 /* read bytes up to the last 4 byte boundary */
10188 pd = &data[eeprom->len];
10189 for (i = 0; i < (len - (len & 3)); i += 4) {
10190 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10195 memcpy(pd + i, &val, 4);
10200 /* read last bytes not ending on 4 byte boundary */
10201 pd = &data[eeprom->len];
10203 b_offset = offset + len - b_count;
10204 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10207 memcpy(pd, &val, b_count);
10208 eeprom->len += b_count;
10213 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10215 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10217 struct tg3 *tp = netdev_priv(dev);
10219 u32 offset, len, b_offset, odd_len;
10223 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10226 if (tg3_flag(tp, NO_NVRAM) ||
10227 eeprom->magic != TG3_EEPROM_MAGIC)
10230 offset = eeprom->offset;
10233 if ((b_offset = (offset & 3))) {
10234 /* adjustments to start on required 4 byte boundary */
10235 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10246 /* adjustments to end on required 4 byte boundary */
10248 len = (len + 3) & ~3;
10249 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10255 if (b_offset || odd_len) {
10256 buf = kmalloc(len, GFP_KERNEL);
10260 memcpy(buf, &start, 4);
10262 memcpy(buf+len-4, &end, 4);
10263 memcpy(buf + b_offset, data, eeprom->len);
10266 ret = tg3_nvram_write_block(tp, offset, len, buf);
10274 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10276 struct tg3 *tp = netdev_priv(dev);
10278 if (tg3_flag(tp, USE_PHYLIB)) {
10279 struct phy_device *phydev;
10280 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10282 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10283 return phy_ethtool_gset(phydev, cmd);
10286 cmd->supported = (SUPPORTED_Autoneg);
10288 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10289 cmd->supported |= (SUPPORTED_1000baseT_Half |
10290 SUPPORTED_1000baseT_Full);
10292 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10293 cmd->supported |= (SUPPORTED_100baseT_Half |
10294 SUPPORTED_100baseT_Full |
10295 SUPPORTED_10baseT_Half |
10296 SUPPORTED_10baseT_Full |
10298 cmd->port = PORT_TP;
10300 cmd->supported |= SUPPORTED_FIBRE;
10301 cmd->port = PORT_FIBRE;
10304 cmd->advertising = tp->link_config.advertising;
10305 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10306 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10307 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10308 cmd->advertising |= ADVERTISED_Pause;
10310 cmd->advertising |= ADVERTISED_Pause |
10311 ADVERTISED_Asym_Pause;
10313 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10314 cmd->advertising |= ADVERTISED_Asym_Pause;
10317 if (netif_running(dev)) {
10318 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10319 cmd->duplex = tp->link_config.active_duplex;
10321 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10322 cmd->duplex = DUPLEX_INVALID;
10324 cmd->phy_address = tp->phy_addr;
10325 cmd->transceiver = XCVR_INTERNAL;
10326 cmd->autoneg = tp->link_config.autoneg;
10332 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10334 struct tg3 *tp = netdev_priv(dev);
10335 u32 speed = ethtool_cmd_speed(cmd);
10337 if (tg3_flag(tp, USE_PHYLIB)) {
10338 struct phy_device *phydev;
10339 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10341 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10342 return phy_ethtool_sset(phydev, cmd);
10345 if (cmd->autoneg != AUTONEG_ENABLE &&
10346 cmd->autoneg != AUTONEG_DISABLE)
10349 if (cmd->autoneg == AUTONEG_DISABLE &&
10350 cmd->duplex != DUPLEX_FULL &&
10351 cmd->duplex != DUPLEX_HALF)
10354 if (cmd->autoneg == AUTONEG_ENABLE) {
10355 u32 mask = ADVERTISED_Autoneg |
10357 ADVERTISED_Asym_Pause;
10359 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10360 mask |= ADVERTISED_1000baseT_Half |
10361 ADVERTISED_1000baseT_Full;
10363 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10364 mask |= ADVERTISED_100baseT_Half |
10365 ADVERTISED_100baseT_Full |
10366 ADVERTISED_10baseT_Half |
10367 ADVERTISED_10baseT_Full |
10370 mask |= ADVERTISED_FIBRE;
10372 if (cmd->advertising & ~mask)
10375 mask &= (ADVERTISED_1000baseT_Half |
10376 ADVERTISED_1000baseT_Full |
10377 ADVERTISED_100baseT_Half |
10378 ADVERTISED_100baseT_Full |
10379 ADVERTISED_10baseT_Half |
10380 ADVERTISED_10baseT_Full);
10382 cmd->advertising &= mask;
10384 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10385 if (speed != SPEED_1000)
10388 if (cmd->duplex != DUPLEX_FULL)
10391 if (speed != SPEED_100 &&
10397 tg3_full_lock(tp, 0);
10399 tp->link_config.autoneg = cmd->autoneg;
10400 if (cmd->autoneg == AUTONEG_ENABLE) {
10401 tp->link_config.advertising = (cmd->advertising |
10402 ADVERTISED_Autoneg);
10403 tp->link_config.speed = SPEED_INVALID;
10404 tp->link_config.duplex = DUPLEX_INVALID;
10406 tp->link_config.advertising = 0;
10407 tp->link_config.speed = speed;
10408 tp->link_config.duplex = cmd->duplex;
10411 tp->link_config.orig_speed = tp->link_config.speed;
10412 tp->link_config.orig_duplex = tp->link_config.duplex;
10413 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10415 if (netif_running(dev))
10416 tg3_setup_phy(tp, 1);
10418 tg3_full_unlock(tp);
10423 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10425 struct tg3 *tp = netdev_priv(dev);
10427 strcpy(info->driver, DRV_MODULE_NAME);
10428 strcpy(info->version, DRV_MODULE_VERSION);
10429 strcpy(info->fw_version, tp->fw_ver);
10430 strcpy(info->bus_info, pci_name(tp->pdev));
10433 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10435 struct tg3 *tp = netdev_priv(dev);
10437 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10438 wol->supported = WAKE_MAGIC;
10440 wol->supported = 0;
10442 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10443 wol->wolopts = WAKE_MAGIC;
10444 memset(&wol->sopass, 0, sizeof(wol->sopass));
10447 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10449 struct tg3 *tp = netdev_priv(dev);
10450 struct device *dp = &tp->pdev->dev;
10452 if (wol->wolopts & ~WAKE_MAGIC)
10454 if ((wol->wolopts & WAKE_MAGIC) &&
10455 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10458 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10460 spin_lock_bh(&tp->lock);
10461 if (device_may_wakeup(dp))
10462 tg3_flag_set(tp, WOL_ENABLE);
10464 tg3_flag_clear(tp, WOL_ENABLE);
10465 spin_unlock_bh(&tp->lock);
10470 static u32 tg3_get_msglevel(struct net_device *dev)
10472 struct tg3 *tp = netdev_priv(dev);
10473 return tp->msg_enable;
10476 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10478 struct tg3 *tp = netdev_priv(dev);
10479 tp->msg_enable = value;
10482 static int tg3_nway_reset(struct net_device *dev)
10484 struct tg3 *tp = netdev_priv(dev);
10487 if (!netif_running(dev))
10490 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10493 if (tg3_flag(tp, USE_PHYLIB)) {
10494 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10496 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10500 spin_lock_bh(&tp->lock);
10502 tg3_readphy(tp, MII_BMCR, &bmcr);
10503 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10504 ((bmcr & BMCR_ANENABLE) ||
10505 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10506 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10510 spin_unlock_bh(&tp->lock);
10516 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10518 struct tg3 *tp = netdev_priv(dev);
10520 ering->rx_max_pending = tp->rx_std_ring_mask;
10521 ering->rx_mini_max_pending = 0;
10522 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10523 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10525 ering->rx_jumbo_max_pending = 0;
10527 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10529 ering->rx_pending = tp->rx_pending;
10530 ering->rx_mini_pending = 0;
10531 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10532 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10534 ering->rx_jumbo_pending = 0;
10536 ering->tx_pending = tp->napi[0].tx_pending;
10539 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10541 struct tg3 *tp = netdev_priv(dev);
10542 int i, irq_sync = 0, err = 0;
10544 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10545 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10546 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10547 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10548 (tg3_flag(tp, TSO_BUG) &&
10549 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10552 if (netif_running(dev)) {
10554 tg3_netif_stop(tp);
10558 tg3_full_lock(tp, irq_sync);
10560 tp->rx_pending = ering->rx_pending;
10562 if (tg3_flag(tp, MAX_RXPEND_64) &&
10563 tp->rx_pending > 63)
10564 tp->rx_pending = 63;
10565 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10567 for (i = 0; i < tp->irq_max; i++)
10568 tp->napi[i].tx_pending = ering->tx_pending;
10570 if (netif_running(dev)) {
10571 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10572 err = tg3_restart_hw(tp, 1);
10574 tg3_netif_start(tp);
10577 tg3_full_unlock(tp);
10579 if (irq_sync && !err)
10585 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10587 struct tg3 *tp = netdev_priv(dev);
10589 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10591 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10592 epause->rx_pause = 1;
10594 epause->rx_pause = 0;
10596 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10597 epause->tx_pause = 1;
10599 epause->tx_pause = 0;
10602 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10604 struct tg3 *tp = netdev_priv(dev);
10607 if (tg3_flag(tp, USE_PHYLIB)) {
10609 struct phy_device *phydev;
10611 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10613 if (!(phydev->supported & SUPPORTED_Pause) ||
10614 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10615 (epause->rx_pause != epause->tx_pause)))
10618 tp->link_config.flowctrl = 0;
10619 if (epause->rx_pause) {
10620 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10622 if (epause->tx_pause) {
10623 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10624 newadv = ADVERTISED_Pause;
10626 newadv = ADVERTISED_Pause |
10627 ADVERTISED_Asym_Pause;
10628 } else if (epause->tx_pause) {
10629 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10630 newadv = ADVERTISED_Asym_Pause;
10634 if (epause->autoneg)
10635 tg3_flag_set(tp, PAUSE_AUTONEG);
10637 tg3_flag_clear(tp, PAUSE_AUTONEG);
10639 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10640 u32 oldadv = phydev->advertising &
10641 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10642 if (oldadv != newadv) {
10643 phydev->advertising &=
10644 ~(ADVERTISED_Pause |
10645 ADVERTISED_Asym_Pause);
10646 phydev->advertising |= newadv;
10647 if (phydev->autoneg) {
10649 * Always renegotiate the link to
10650 * inform our link partner of our
10651 * flow control settings, even if the
10652 * flow control is forced. Let
10653 * tg3_adjust_link() do the final
10654 * flow control setup.
10656 return phy_start_aneg(phydev);
10660 if (!epause->autoneg)
10661 tg3_setup_flow_control(tp, 0, 0);
10663 tp->link_config.orig_advertising &=
10664 ~(ADVERTISED_Pause |
10665 ADVERTISED_Asym_Pause);
10666 tp->link_config.orig_advertising |= newadv;
10671 if (netif_running(dev)) {
10672 tg3_netif_stop(tp);
10676 tg3_full_lock(tp, irq_sync);
10678 if (epause->autoneg)
10679 tg3_flag_set(tp, PAUSE_AUTONEG);
10681 tg3_flag_clear(tp, PAUSE_AUTONEG);
10682 if (epause->rx_pause)
10683 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10685 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10686 if (epause->tx_pause)
10687 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10689 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10691 if (netif_running(dev)) {
10692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10693 err = tg3_restart_hw(tp, 1);
10695 tg3_netif_start(tp);
10698 tg3_full_unlock(tp);
10704 static int tg3_get_sset_count(struct net_device *dev, int sset)
10708 return TG3_NUM_TEST;
10710 return TG3_NUM_STATS;
10712 return -EOPNOTSUPP;
10716 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10718 switch (stringset) {
10720 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10723 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10726 WARN_ON(1); /* we need a WARN() */
10731 static int tg3_set_phys_id(struct net_device *dev,
10732 enum ethtool_phys_id_state state)
10734 struct tg3 *tp = netdev_priv(dev);
10736 if (!netif_running(tp->dev))
10740 case ETHTOOL_ID_ACTIVE:
10741 return 1; /* cycle on/off once per second */
10743 case ETHTOOL_ID_ON:
10744 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10745 LED_CTRL_1000MBPS_ON |
10746 LED_CTRL_100MBPS_ON |
10747 LED_CTRL_10MBPS_ON |
10748 LED_CTRL_TRAFFIC_OVERRIDE |
10749 LED_CTRL_TRAFFIC_BLINK |
10750 LED_CTRL_TRAFFIC_LED);
10753 case ETHTOOL_ID_OFF:
10754 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10755 LED_CTRL_TRAFFIC_OVERRIDE);
10758 case ETHTOOL_ID_INACTIVE:
10759 tw32(MAC_LED_CTRL, tp->led_ctrl);
10766 static void tg3_get_ethtool_stats(struct net_device *dev,
10767 struct ethtool_stats *estats, u64 *tmp_stats)
10769 struct tg3 *tp = netdev_priv(dev);
10770 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10773 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10777 u32 offset = 0, len = 0;
10780 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10783 if (magic == TG3_EEPROM_MAGIC) {
10784 for (offset = TG3_NVM_DIR_START;
10785 offset < TG3_NVM_DIR_END;
10786 offset += TG3_NVM_DIRENT_SIZE) {
10787 if (tg3_nvram_read(tp, offset, &val))
10790 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10791 TG3_NVM_DIRTYPE_EXTVPD)
10795 if (offset != TG3_NVM_DIR_END) {
10796 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10797 if (tg3_nvram_read(tp, offset + 4, &offset))
10800 offset = tg3_nvram_logical_addr(tp, offset);
10804 if (!offset || !len) {
10805 offset = TG3_NVM_VPD_OFF;
10806 len = TG3_NVM_VPD_LEN;
10809 buf = kmalloc(len, GFP_KERNEL);
10813 if (magic == TG3_EEPROM_MAGIC) {
10814 for (i = 0; i < len; i += 4) {
10815 /* The data is in little-endian format in NVRAM.
10816 * Use the big-endian read routines to preserve
10817 * the byte order as it exists in NVRAM.
10819 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10825 unsigned int pos = 0;
10827 ptr = (u8 *)&buf[0];
10828 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10829 cnt = pci_read_vpd(tp->pdev, pos,
10831 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10849 #define NVRAM_TEST_SIZE 0x100
10850 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10851 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10852 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10853 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10854 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10855 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10856 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10857 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10859 static int tg3_test_nvram(struct tg3 *tp)
10861 u32 csum, magic, len;
10863 int i, j, k, err = 0, size;
10865 if (tg3_flag(tp, NO_NVRAM))
10868 if (tg3_nvram_read(tp, 0, &magic) != 0)
10871 if (magic == TG3_EEPROM_MAGIC)
10872 size = NVRAM_TEST_SIZE;
10873 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10874 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10875 TG3_EEPROM_SB_FORMAT_1) {
10876 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10877 case TG3_EEPROM_SB_REVISION_0:
10878 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10880 case TG3_EEPROM_SB_REVISION_2:
10881 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10883 case TG3_EEPROM_SB_REVISION_3:
10884 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10886 case TG3_EEPROM_SB_REVISION_4:
10887 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10889 case TG3_EEPROM_SB_REVISION_5:
10890 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10892 case TG3_EEPROM_SB_REVISION_6:
10893 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10900 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10901 size = NVRAM_SELFBOOT_HW_SIZE;
10905 buf = kmalloc(size, GFP_KERNEL);
10910 for (i = 0, j = 0; i < size; i += 4, j++) {
10911 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10918 /* Selfboot format */
10919 magic = be32_to_cpu(buf[0]);
10920 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10921 TG3_EEPROM_MAGIC_FW) {
10922 u8 *buf8 = (u8 *) buf, csum8 = 0;
10924 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10925 TG3_EEPROM_SB_REVISION_2) {
10926 /* For rev 2, the csum doesn't include the MBA. */
10927 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10929 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10932 for (i = 0; i < size; i++)
10945 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10946 TG3_EEPROM_MAGIC_HW) {
10947 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10948 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10949 u8 *buf8 = (u8 *) buf;
10951 /* Separate the parity bits and the data bytes. */
10952 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10953 if ((i == 0) || (i == 8)) {
10957 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10958 parity[k++] = buf8[i] & msk;
10960 } else if (i == 16) {
10964 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10965 parity[k++] = buf8[i] & msk;
10968 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10969 parity[k++] = buf8[i] & msk;
10972 data[j++] = buf8[i];
10976 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10977 u8 hw8 = hweight8(data[i]);
10979 if ((hw8 & 0x1) && parity[i])
10981 else if (!(hw8 & 0x1) && !parity[i])
10990 /* Bootstrap checksum at offset 0x10 */
10991 csum = calc_crc((unsigned char *) buf, 0x10);
10992 if (csum != le32_to_cpu(buf[0x10/4]))
10995 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10996 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10997 if (csum != le32_to_cpu(buf[0xfc/4]))
11002 buf = tg3_vpd_readblock(tp, &len);
11006 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11008 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11012 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11015 i += PCI_VPD_LRDT_TAG_SIZE;
11016 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11017 PCI_VPD_RO_KEYWORD_CHKSUM);
11021 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11023 for (i = 0; i <= j; i++)
11024 csum8 += ((u8 *)buf)[i];
11038 #define TG3_SERDES_TIMEOUT_SEC 2
11039 #define TG3_COPPER_TIMEOUT_SEC 6
11041 static int tg3_test_link(struct tg3 *tp)
11045 if (!netif_running(tp->dev))
11048 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11049 max = TG3_SERDES_TIMEOUT_SEC;
11051 max = TG3_COPPER_TIMEOUT_SEC;
11053 for (i = 0; i < max; i++) {
11054 if (netif_carrier_ok(tp->dev))
11057 if (msleep_interruptible(1000))
11064 /* Only test the commonly used registers */
11065 static int tg3_test_registers(struct tg3 *tp)
11067 int i, is_5705, is_5750;
11068 u32 offset, read_mask, write_mask, val, save_val, read_val;
11072 #define TG3_FL_5705 0x1
11073 #define TG3_FL_NOT_5705 0x2
11074 #define TG3_FL_NOT_5788 0x4
11075 #define TG3_FL_NOT_5750 0x8
11079 /* MAC Control Registers */
11080 { MAC_MODE, TG3_FL_NOT_5705,
11081 0x00000000, 0x00ef6f8c },
11082 { MAC_MODE, TG3_FL_5705,
11083 0x00000000, 0x01ef6b8c },
11084 { MAC_STATUS, TG3_FL_NOT_5705,
11085 0x03800107, 0x00000000 },
11086 { MAC_STATUS, TG3_FL_5705,
11087 0x03800100, 0x00000000 },
11088 { MAC_ADDR_0_HIGH, 0x0000,
11089 0x00000000, 0x0000ffff },
11090 { MAC_ADDR_0_LOW, 0x0000,
11091 0x00000000, 0xffffffff },
11092 { MAC_RX_MTU_SIZE, 0x0000,
11093 0x00000000, 0x0000ffff },
11094 { MAC_TX_MODE, 0x0000,
11095 0x00000000, 0x00000070 },
11096 { MAC_TX_LENGTHS, 0x0000,
11097 0x00000000, 0x00003fff },
11098 { MAC_RX_MODE, TG3_FL_NOT_5705,
11099 0x00000000, 0x000007fc },
11100 { MAC_RX_MODE, TG3_FL_5705,
11101 0x00000000, 0x000007dc },
11102 { MAC_HASH_REG_0, 0x0000,
11103 0x00000000, 0xffffffff },
11104 { MAC_HASH_REG_1, 0x0000,
11105 0x00000000, 0xffffffff },
11106 { MAC_HASH_REG_2, 0x0000,
11107 0x00000000, 0xffffffff },
11108 { MAC_HASH_REG_3, 0x0000,
11109 0x00000000, 0xffffffff },
11111 /* Receive Data and Receive BD Initiator Control Registers. */
11112 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11113 0x00000000, 0xffffffff },
11114 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11115 0x00000000, 0xffffffff },
11116 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11117 0x00000000, 0x00000003 },
11118 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11119 0x00000000, 0xffffffff },
11120 { RCVDBDI_STD_BD+0, 0x0000,
11121 0x00000000, 0xffffffff },
11122 { RCVDBDI_STD_BD+4, 0x0000,
11123 0x00000000, 0xffffffff },
11124 { RCVDBDI_STD_BD+8, 0x0000,
11125 0x00000000, 0xffff0002 },
11126 { RCVDBDI_STD_BD+0xc, 0x0000,
11127 0x00000000, 0xffffffff },
11129 /* Receive BD Initiator Control Registers. */
11130 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11131 0x00000000, 0xffffffff },
11132 { RCVBDI_STD_THRESH, TG3_FL_5705,
11133 0x00000000, 0x000003ff },
11134 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11135 0x00000000, 0xffffffff },
11137 /* Host Coalescing Control Registers. */
11138 { HOSTCC_MODE, TG3_FL_NOT_5705,
11139 0x00000000, 0x00000004 },
11140 { HOSTCC_MODE, TG3_FL_5705,
11141 0x00000000, 0x000000f6 },
11142 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11143 0x00000000, 0xffffffff },
11144 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11145 0x00000000, 0x000003ff },
11146 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11147 0x00000000, 0xffffffff },
11148 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11149 0x00000000, 0x000003ff },
11150 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11151 0x00000000, 0xffffffff },
11152 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11153 0x00000000, 0x000000ff },
11154 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11155 0x00000000, 0xffffffff },
11156 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11157 0x00000000, 0x000000ff },
11158 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11159 0x00000000, 0xffffffff },
11160 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11161 0x00000000, 0xffffffff },
11162 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11163 0x00000000, 0xffffffff },
11164 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11165 0x00000000, 0x000000ff },
11166 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11167 0x00000000, 0xffffffff },
11168 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11169 0x00000000, 0x000000ff },
11170 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11171 0x00000000, 0xffffffff },
11172 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11173 0x00000000, 0xffffffff },
11174 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11175 0x00000000, 0xffffffff },
11176 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11177 0x00000000, 0xffffffff },
11178 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11179 0x00000000, 0xffffffff },
11180 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11181 0xffffffff, 0x00000000 },
11182 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11183 0xffffffff, 0x00000000 },
11185 /* Buffer Manager Control Registers. */
11186 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11187 0x00000000, 0x007fff80 },
11188 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11189 0x00000000, 0x007fffff },
11190 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11191 0x00000000, 0x0000003f },
11192 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11193 0x00000000, 0x000001ff },
11194 { BUFMGR_MB_HIGH_WATER, 0x0000,
11195 0x00000000, 0x000001ff },
11196 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11197 0xffffffff, 0x00000000 },
11198 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11199 0xffffffff, 0x00000000 },
11201 /* Mailbox Registers */
11202 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11203 0x00000000, 0x000001ff },
11204 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11205 0x00000000, 0x000001ff },
11206 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11207 0x00000000, 0x000007ff },
11208 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11209 0x00000000, 0x000001ff },
11211 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11214 is_5705 = is_5750 = 0;
11215 if (tg3_flag(tp, 5705_PLUS)) {
11217 if (tg3_flag(tp, 5750_PLUS))
11221 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11222 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11225 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11228 if (tg3_flag(tp, IS_5788) &&
11229 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11232 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11235 offset = (u32) reg_tbl[i].offset;
11236 read_mask = reg_tbl[i].read_mask;
11237 write_mask = reg_tbl[i].write_mask;
11239 /* Save the original register content */
11240 save_val = tr32(offset);
11242 /* Determine the read-only value. */
11243 read_val = save_val & read_mask;
11245 /* Write zero to the register, then make sure the read-only bits
11246 * are not changed and the read/write bits are all zeros.
11250 val = tr32(offset);
11252 /* Test the read-only and read/write bits. */
11253 if (((val & read_mask) != read_val) || (val & write_mask))
11256 /* Write ones to all the bits defined by RdMask and WrMask, then
11257 * make sure the read-only bits are not changed and the
11258 * read/write bits are all ones.
11260 tw32(offset, read_mask | write_mask);
11262 val = tr32(offset);
11264 /* Test the read-only bits. */
11265 if ((val & read_mask) != read_val)
11268 /* Test the read/write bits. */
11269 if ((val & write_mask) != write_mask)
11272 tw32(offset, save_val);
11278 if (netif_msg_hw(tp))
11279 netdev_err(tp->dev,
11280 "Register test failed at offset %x\n", offset);
11281 tw32(offset, save_val);
11285 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11287 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11291 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11292 for (j = 0; j < len; j += 4) {
11295 tg3_write_mem(tp, offset + j, test_pattern[i]);
11296 tg3_read_mem(tp, offset + j, &val);
11297 if (val != test_pattern[i])
11304 static int tg3_test_memory(struct tg3 *tp)
11306 static struct mem_entry {
11309 } mem_tbl_570x[] = {
11310 { 0x00000000, 0x00b50},
11311 { 0x00002000, 0x1c000},
11312 { 0xffffffff, 0x00000}
11313 }, mem_tbl_5705[] = {
11314 { 0x00000100, 0x0000c},
11315 { 0x00000200, 0x00008},
11316 { 0x00004000, 0x00800},
11317 { 0x00006000, 0x01000},
11318 { 0x00008000, 0x02000},
11319 { 0x00010000, 0x0e000},
11320 { 0xffffffff, 0x00000}
11321 }, mem_tbl_5755[] = {
11322 { 0x00000200, 0x00008},
11323 { 0x00004000, 0x00800},
11324 { 0x00006000, 0x00800},
11325 { 0x00008000, 0x02000},
11326 { 0x00010000, 0x0c000},
11327 { 0xffffffff, 0x00000}
11328 }, mem_tbl_5906[] = {
11329 { 0x00000200, 0x00008},
11330 { 0x00004000, 0x00400},
11331 { 0x00006000, 0x00400},
11332 { 0x00008000, 0x01000},
11333 { 0x00010000, 0x01000},
11334 { 0xffffffff, 0x00000}
11335 }, mem_tbl_5717[] = {
11336 { 0x00000200, 0x00008},
11337 { 0x00010000, 0x0a000},
11338 { 0x00020000, 0x13c00},
11339 { 0xffffffff, 0x00000}
11340 }, mem_tbl_57765[] = {
11341 { 0x00000200, 0x00008},
11342 { 0x00004000, 0x00800},
11343 { 0x00006000, 0x09800},
11344 { 0x00010000, 0x0a000},
11345 { 0xffffffff, 0x00000}
11347 struct mem_entry *mem_tbl;
11351 if (tg3_flag(tp, 5717_PLUS))
11352 mem_tbl = mem_tbl_5717;
11353 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11354 mem_tbl = mem_tbl_57765;
11355 else if (tg3_flag(tp, 5755_PLUS))
11356 mem_tbl = mem_tbl_5755;
11357 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11358 mem_tbl = mem_tbl_5906;
11359 else if (tg3_flag(tp, 5705_PLUS))
11360 mem_tbl = mem_tbl_5705;
11362 mem_tbl = mem_tbl_570x;
11364 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11365 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11373 #define TG3_TSO_MSS 500
11375 #define TG3_TSO_IP_HDR_LEN 20
11376 #define TG3_TSO_TCP_HDR_LEN 20
11377 #define TG3_TSO_TCP_OPT_LEN 12
11379 static const u8 tg3_tso_header[] = {
11381 0x45, 0x00, 0x00, 0x00,
11382 0x00, 0x00, 0x40, 0x00,
11383 0x40, 0x06, 0x00, 0x00,
11384 0x0a, 0x00, 0x00, 0x01,
11385 0x0a, 0x00, 0x00, 0x02,
11386 0x0d, 0x00, 0xe0, 0x00,
11387 0x00, 0x00, 0x01, 0x00,
11388 0x00, 0x00, 0x02, 0x00,
11389 0x80, 0x10, 0x10, 0x00,
11390 0x14, 0x09, 0x00, 0x00,
11391 0x01, 0x01, 0x08, 0x0a,
11392 0x11, 0x11, 0x11, 0x11,
11393 0x11, 0x11, 0x11, 0x11,
11396 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11398 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11399 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11401 struct sk_buff *skb, *rx_skb;
11404 int num_pkts, tx_len, rx_len, i, err;
11405 struct tg3_rx_buffer_desc *desc;
11406 struct tg3_napi *tnapi, *rnapi;
11407 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11409 tnapi = &tp->napi[0];
11410 rnapi = &tp->napi[0];
11411 if (tp->irq_cnt > 1) {
11412 if (tg3_flag(tp, ENABLE_RSS))
11413 rnapi = &tp->napi[1];
11414 if (tg3_flag(tp, ENABLE_TSS))
11415 tnapi = &tp->napi[1];
11417 coal_now = tnapi->coal_now | rnapi->coal_now;
11422 skb = netdev_alloc_skb(tp->dev, tx_len);
11426 tx_data = skb_put(skb, tx_len);
11427 memcpy(tx_data, tp->dev->dev_addr, 6);
11428 memset(tx_data + 6, 0x0, 8);
11430 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11432 if (tso_loopback) {
11433 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11435 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11436 TG3_TSO_TCP_OPT_LEN;
11438 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11439 sizeof(tg3_tso_header));
11442 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11443 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11445 /* Set the total length field in the IP header */
11446 iph->tot_len = htons((u16)(mss + hdr_len));
11448 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11449 TXD_FLAG_CPU_POST_DMA);
11451 if (tg3_flag(tp, HW_TSO_1) ||
11452 tg3_flag(tp, HW_TSO_2) ||
11453 tg3_flag(tp, HW_TSO_3)) {
11455 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11456 th = (struct tcphdr *)&tx_data[val];
11459 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11461 if (tg3_flag(tp, HW_TSO_3)) {
11462 mss |= (hdr_len & 0xc) << 12;
11463 if (hdr_len & 0x10)
11464 base_flags |= 0x00000010;
11465 base_flags |= (hdr_len & 0x3e0) << 5;
11466 } else if (tg3_flag(tp, HW_TSO_2))
11467 mss |= hdr_len << 9;
11468 else if (tg3_flag(tp, HW_TSO_1) ||
11469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11470 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11472 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11475 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11478 data_off = ETH_HLEN;
11481 for (i = data_off; i < tx_len; i++)
11482 tx_data[i] = (u8) (i & 0xff);
11484 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11485 if (pci_dma_mapping_error(tp->pdev, map)) {
11486 dev_kfree_skb(skb);
11490 val = tnapi->tx_prod;
11491 tnapi->tx_buffers[val].skb = skb;
11492 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11494 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11499 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11501 budget = tg3_tx_avail(tnapi);
11502 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11503 base_flags | TXD_FLAG_END, mss, 0)) {
11504 tnapi->tx_buffers[val].skb = NULL;
11505 dev_kfree_skb(skb);
11511 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11512 tr32_mailbox(tnapi->prodmbox);
11516 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11517 for (i = 0; i < 35; i++) {
11518 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11523 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11524 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11525 if ((tx_idx == tnapi->tx_prod) &&
11526 (rx_idx == (rx_start_idx + num_pkts)))
11530 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11531 dev_kfree_skb(skb);
11533 if (tx_idx != tnapi->tx_prod)
11536 if (rx_idx != rx_start_idx + num_pkts)
11540 while (rx_idx != rx_start_idx) {
11541 desc = &rnapi->rx_rcb[rx_start_idx++];
11542 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11543 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11545 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11546 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11549 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11552 if (!tso_loopback) {
11553 if (rx_len != tx_len)
11556 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11557 if (opaque_key != RXD_OPAQUE_RING_STD)
11560 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11563 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11564 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11565 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11569 if (opaque_key == RXD_OPAQUE_RING_STD) {
11570 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11571 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11573 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11574 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11575 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11580 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11581 PCI_DMA_FROMDEVICE);
11583 for (i = data_off; i < rx_len; i++, val++) {
11584 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11591 /* tg3_free_rings will unmap and free the rx_skb */
11596 #define TG3_STD_LOOPBACK_FAILED 1
11597 #define TG3_JMB_LOOPBACK_FAILED 2
11598 #define TG3_TSO_LOOPBACK_FAILED 4
11599 #define TG3_LOOPBACK_FAILED \
11600 (TG3_STD_LOOPBACK_FAILED | \
11601 TG3_JMB_LOOPBACK_FAILED | \
11602 TG3_TSO_LOOPBACK_FAILED)
11604 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11609 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11610 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11612 if (!netif_running(tp->dev)) {
11613 data[0] = TG3_LOOPBACK_FAILED;
11614 data[1] = TG3_LOOPBACK_FAILED;
11616 data[2] = TG3_LOOPBACK_FAILED;
11620 err = tg3_reset_hw(tp, 1);
11622 data[0] = TG3_LOOPBACK_FAILED;
11623 data[1] = TG3_LOOPBACK_FAILED;
11625 data[2] = TG3_LOOPBACK_FAILED;
11629 if (tg3_flag(tp, ENABLE_RSS)) {
11632 /* Reroute all rx packets to the 1st queue */
11633 for (i = MAC_RSS_INDIR_TBL_0;
11634 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11638 /* HW errata - mac loopback fails in some cases on 5780.
11639 * Normal traffic and PHY loopback are not affected by
11640 * errata. Also, the MAC loopback test is deprecated for
11641 * all newer ASIC revisions.
11643 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11644 !tg3_flag(tp, CPMU_PRESENT)) {
11645 tg3_mac_loopback(tp, true);
11647 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11648 data[0] |= TG3_STD_LOOPBACK_FAILED;
11650 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11651 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11652 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11654 tg3_mac_loopback(tp, false);
11657 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11658 !tg3_flag(tp, USE_PHYLIB)) {
11661 tg3_phy_lpbk_set(tp, 0, false);
11663 /* Wait for link */
11664 for (i = 0; i < 100; i++) {
11665 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11670 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11671 data[1] |= TG3_STD_LOOPBACK_FAILED;
11672 if (tg3_flag(tp, TSO_CAPABLE) &&
11673 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11674 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11675 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11676 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11677 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11680 tg3_phy_lpbk_set(tp, 0, true);
11682 /* All link indications report up, but the hardware
11683 * isn't really ready for about 20 msec. Double it
11688 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11689 data[2] |= TG3_STD_LOOPBACK_FAILED;
11690 if (tg3_flag(tp, TSO_CAPABLE) &&
11691 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11692 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11693 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11694 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11695 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11698 /* Re-enable gphy autopowerdown. */
11699 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11700 tg3_phy_toggle_apd(tp, true);
11703 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11706 tp->phy_flags |= eee_cap;
11711 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11714 struct tg3 *tp = netdev_priv(dev);
11715 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11717 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11718 tg3_power_up(tp)) {
11719 etest->flags |= ETH_TEST_FL_FAILED;
11720 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11724 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11726 if (tg3_test_nvram(tp) != 0) {
11727 etest->flags |= ETH_TEST_FL_FAILED;
11730 if (!doextlpbk && tg3_test_link(tp)) {
11731 etest->flags |= ETH_TEST_FL_FAILED;
11734 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11735 int err, err2 = 0, irq_sync = 0;
11737 if (netif_running(dev)) {
11739 tg3_netif_stop(tp);
11743 tg3_full_lock(tp, irq_sync);
11745 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11746 err = tg3_nvram_lock(tp);
11747 tg3_halt_cpu(tp, RX_CPU_BASE);
11748 if (!tg3_flag(tp, 5705_PLUS))
11749 tg3_halt_cpu(tp, TX_CPU_BASE);
11751 tg3_nvram_unlock(tp);
11753 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11756 if (tg3_test_registers(tp) != 0) {
11757 etest->flags |= ETH_TEST_FL_FAILED;
11761 if (tg3_test_memory(tp) != 0) {
11762 etest->flags |= ETH_TEST_FL_FAILED;
11767 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11769 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11770 etest->flags |= ETH_TEST_FL_FAILED;
11772 tg3_full_unlock(tp);
11774 if (tg3_test_interrupt(tp) != 0) {
11775 etest->flags |= ETH_TEST_FL_FAILED;
11779 tg3_full_lock(tp, 0);
11781 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11782 if (netif_running(dev)) {
11783 tg3_flag_set(tp, INIT_COMPLETE);
11784 err2 = tg3_restart_hw(tp, 1);
11786 tg3_netif_start(tp);
11789 tg3_full_unlock(tp);
11791 if (irq_sync && !err2)
11794 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11795 tg3_power_down(tp);
11799 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11801 struct mii_ioctl_data *data = if_mii(ifr);
11802 struct tg3 *tp = netdev_priv(dev);
11805 if (tg3_flag(tp, USE_PHYLIB)) {
11806 struct phy_device *phydev;
11807 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11809 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11810 return phy_mii_ioctl(phydev, ifr, cmd);
11815 data->phy_id = tp->phy_addr;
11818 case SIOCGMIIREG: {
11821 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11822 break; /* We have no PHY */
11824 if (!netif_running(dev))
11827 spin_lock_bh(&tp->lock);
11828 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11829 spin_unlock_bh(&tp->lock);
11831 data->val_out = mii_regval;
11837 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11838 break; /* We have no PHY */
11840 if (!netif_running(dev))
11843 spin_lock_bh(&tp->lock);
11844 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11845 spin_unlock_bh(&tp->lock);
11853 return -EOPNOTSUPP;
11856 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11858 struct tg3 *tp = netdev_priv(dev);
11860 memcpy(ec, &tp->coal, sizeof(*ec));
11864 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11866 struct tg3 *tp = netdev_priv(dev);
11867 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11868 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11870 if (!tg3_flag(tp, 5705_PLUS)) {
11871 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11872 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11873 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11874 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11877 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11878 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11879 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11880 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11881 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11882 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11883 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11884 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11885 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11886 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11889 /* No rx interrupts will be generated if both are zero */
11890 if ((ec->rx_coalesce_usecs == 0) &&
11891 (ec->rx_max_coalesced_frames == 0))
11894 /* No tx interrupts will be generated if both are zero */
11895 if ((ec->tx_coalesce_usecs == 0) &&
11896 (ec->tx_max_coalesced_frames == 0))
11899 /* Only copy relevant parameters, ignore all others. */
11900 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11901 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11902 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11903 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11904 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11905 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11906 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11907 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11908 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11910 if (netif_running(dev)) {
11911 tg3_full_lock(tp, 0);
11912 __tg3_set_coalesce(tp, &tp->coal);
11913 tg3_full_unlock(tp);
11918 static const struct ethtool_ops tg3_ethtool_ops = {
11919 .get_settings = tg3_get_settings,
11920 .set_settings = tg3_set_settings,
11921 .get_drvinfo = tg3_get_drvinfo,
11922 .get_regs_len = tg3_get_regs_len,
11923 .get_regs = tg3_get_regs,
11924 .get_wol = tg3_get_wol,
11925 .set_wol = tg3_set_wol,
11926 .get_msglevel = tg3_get_msglevel,
11927 .set_msglevel = tg3_set_msglevel,
11928 .nway_reset = tg3_nway_reset,
11929 .get_link = ethtool_op_get_link,
11930 .get_eeprom_len = tg3_get_eeprom_len,
11931 .get_eeprom = tg3_get_eeprom,
11932 .set_eeprom = tg3_set_eeprom,
11933 .get_ringparam = tg3_get_ringparam,
11934 .set_ringparam = tg3_set_ringparam,
11935 .get_pauseparam = tg3_get_pauseparam,
11936 .set_pauseparam = tg3_set_pauseparam,
11937 .self_test = tg3_self_test,
11938 .get_strings = tg3_get_strings,
11939 .set_phys_id = tg3_set_phys_id,
11940 .get_ethtool_stats = tg3_get_ethtool_stats,
11941 .get_coalesce = tg3_get_coalesce,
11942 .set_coalesce = tg3_set_coalesce,
11943 .get_sset_count = tg3_get_sset_count,
11946 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11948 u32 cursize, val, magic;
11950 tp->nvram_size = EEPROM_CHIP_SIZE;
11952 if (tg3_nvram_read(tp, 0, &magic) != 0)
11955 if ((magic != TG3_EEPROM_MAGIC) &&
11956 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11957 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11961 * Size the chip by reading offsets at increasing powers of two.
11962 * When we encounter our validation signature, we know the addressing
11963 * has wrapped around, and thus have our chip size.
11967 while (cursize < tp->nvram_size) {
11968 if (tg3_nvram_read(tp, cursize, &val) != 0)
11977 tp->nvram_size = cursize;
11980 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11984 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11987 /* Selfboot format */
11988 if (val != TG3_EEPROM_MAGIC) {
11989 tg3_get_eeprom_size(tp);
11993 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11995 /* This is confusing. We want to operate on the
11996 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11997 * call will read from NVRAM and byteswap the data
11998 * according to the byteswapping settings for all
11999 * other register accesses. This ensures the data we
12000 * want will always reside in the lower 16-bits.
12001 * However, the data in NVRAM is in LE format, which
12002 * means the data from the NVRAM read will always be
12003 * opposite the endianness of the CPU. The 16-bit
12004 * byteswap then brings the data to CPU endianness.
12006 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12010 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12013 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12017 nvcfg1 = tr32(NVRAM_CFG1);
12018 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12019 tg3_flag_set(tp, FLASH);
12021 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12022 tw32(NVRAM_CFG1, nvcfg1);
12025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12026 tg3_flag(tp, 5780_CLASS)) {
12027 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12028 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12029 tp->nvram_jedecnum = JEDEC_ATMEL;
12030 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12031 tg3_flag_set(tp, NVRAM_BUFFERED);
12033 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12034 tp->nvram_jedecnum = JEDEC_ATMEL;
12035 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12037 case FLASH_VENDOR_ATMEL_EEPROM:
12038 tp->nvram_jedecnum = JEDEC_ATMEL;
12039 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12040 tg3_flag_set(tp, NVRAM_BUFFERED);
12042 case FLASH_VENDOR_ST:
12043 tp->nvram_jedecnum = JEDEC_ST;
12044 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12045 tg3_flag_set(tp, NVRAM_BUFFERED);
12047 case FLASH_VENDOR_SAIFUN:
12048 tp->nvram_jedecnum = JEDEC_SAIFUN;
12049 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12051 case FLASH_VENDOR_SST_SMALL:
12052 case FLASH_VENDOR_SST_LARGE:
12053 tp->nvram_jedecnum = JEDEC_SST;
12054 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12058 tp->nvram_jedecnum = JEDEC_ATMEL;
12059 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12060 tg3_flag_set(tp, NVRAM_BUFFERED);
12064 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12066 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12067 case FLASH_5752PAGE_SIZE_256:
12068 tp->nvram_pagesize = 256;
12070 case FLASH_5752PAGE_SIZE_512:
12071 tp->nvram_pagesize = 512;
12073 case FLASH_5752PAGE_SIZE_1K:
12074 tp->nvram_pagesize = 1024;
12076 case FLASH_5752PAGE_SIZE_2K:
12077 tp->nvram_pagesize = 2048;
12079 case FLASH_5752PAGE_SIZE_4K:
12080 tp->nvram_pagesize = 4096;
12082 case FLASH_5752PAGE_SIZE_264:
12083 tp->nvram_pagesize = 264;
12085 case FLASH_5752PAGE_SIZE_528:
12086 tp->nvram_pagesize = 528;
12091 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12095 nvcfg1 = tr32(NVRAM_CFG1);
12097 /* NVRAM protection for TPM */
12098 if (nvcfg1 & (1 << 27))
12099 tg3_flag_set(tp, PROTECTED_NVRAM);
12101 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12102 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12103 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12104 tp->nvram_jedecnum = JEDEC_ATMEL;
12105 tg3_flag_set(tp, NVRAM_BUFFERED);
12107 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12108 tp->nvram_jedecnum = JEDEC_ATMEL;
12109 tg3_flag_set(tp, NVRAM_BUFFERED);
12110 tg3_flag_set(tp, FLASH);
12112 case FLASH_5752VENDOR_ST_M45PE10:
12113 case FLASH_5752VENDOR_ST_M45PE20:
12114 case FLASH_5752VENDOR_ST_M45PE40:
12115 tp->nvram_jedecnum = JEDEC_ST;
12116 tg3_flag_set(tp, NVRAM_BUFFERED);
12117 tg3_flag_set(tp, FLASH);
12121 if (tg3_flag(tp, FLASH)) {
12122 tg3_nvram_get_pagesize(tp, nvcfg1);
12124 /* For eeprom, set pagesize to maximum eeprom size */
12125 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12127 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12128 tw32(NVRAM_CFG1, nvcfg1);
12132 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12134 u32 nvcfg1, protect = 0;
12136 nvcfg1 = tr32(NVRAM_CFG1);
12138 /* NVRAM protection for TPM */
12139 if (nvcfg1 & (1 << 27)) {
12140 tg3_flag_set(tp, PROTECTED_NVRAM);
12144 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12146 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12147 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12148 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12149 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12150 tp->nvram_jedecnum = JEDEC_ATMEL;
12151 tg3_flag_set(tp, NVRAM_BUFFERED);
12152 tg3_flag_set(tp, FLASH);
12153 tp->nvram_pagesize = 264;
12154 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12155 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12156 tp->nvram_size = (protect ? 0x3e200 :
12157 TG3_NVRAM_SIZE_512KB);
12158 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12159 tp->nvram_size = (protect ? 0x1f200 :
12160 TG3_NVRAM_SIZE_256KB);
12162 tp->nvram_size = (protect ? 0x1f200 :
12163 TG3_NVRAM_SIZE_128KB);
12165 case FLASH_5752VENDOR_ST_M45PE10:
12166 case FLASH_5752VENDOR_ST_M45PE20:
12167 case FLASH_5752VENDOR_ST_M45PE40:
12168 tp->nvram_jedecnum = JEDEC_ST;
12169 tg3_flag_set(tp, NVRAM_BUFFERED);
12170 tg3_flag_set(tp, FLASH);
12171 tp->nvram_pagesize = 256;
12172 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12173 tp->nvram_size = (protect ?
12174 TG3_NVRAM_SIZE_64KB :
12175 TG3_NVRAM_SIZE_128KB);
12176 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12177 tp->nvram_size = (protect ?
12178 TG3_NVRAM_SIZE_64KB :
12179 TG3_NVRAM_SIZE_256KB);
12181 tp->nvram_size = (protect ?
12182 TG3_NVRAM_SIZE_128KB :
12183 TG3_NVRAM_SIZE_512KB);
12188 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12192 nvcfg1 = tr32(NVRAM_CFG1);
12194 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12195 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12196 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12197 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12198 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12199 tp->nvram_jedecnum = JEDEC_ATMEL;
12200 tg3_flag_set(tp, NVRAM_BUFFERED);
12201 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12203 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12204 tw32(NVRAM_CFG1, nvcfg1);
12206 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12207 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12208 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12209 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12210 tp->nvram_jedecnum = JEDEC_ATMEL;
12211 tg3_flag_set(tp, NVRAM_BUFFERED);
12212 tg3_flag_set(tp, FLASH);
12213 tp->nvram_pagesize = 264;
12215 case FLASH_5752VENDOR_ST_M45PE10:
12216 case FLASH_5752VENDOR_ST_M45PE20:
12217 case FLASH_5752VENDOR_ST_M45PE40:
12218 tp->nvram_jedecnum = JEDEC_ST;
12219 tg3_flag_set(tp, NVRAM_BUFFERED);
12220 tg3_flag_set(tp, FLASH);
12221 tp->nvram_pagesize = 256;
12226 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12228 u32 nvcfg1, protect = 0;
12230 nvcfg1 = tr32(NVRAM_CFG1);
12232 /* NVRAM protection for TPM */
12233 if (nvcfg1 & (1 << 27)) {
12234 tg3_flag_set(tp, PROTECTED_NVRAM);
12238 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12240 case FLASH_5761VENDOR_ATMEL_ADB021D:
12241 case FLASH_5761VENDOR_ATMEL_ADB041D:
12242 case FLASH_5761VENDOR_ATMEL_ADB081D:
12243 case FLASH_5761VENDOR_ATMEL_ADB161D:
12244 case FLASH_5761VENDOR_ATMEL_MDB021D:
12245 case FLASH_5761VENDOR_ATMEL_MDB041D:
12246 case FLASH_5761VENDOR_ATMEL_MDB081D:
12247 case FLASH_5761VENDOR_ATMEL_MDB161D:
12248 tp->nvram_jedecnum = JEDEC_ATMEL;
12249 tg3_flag_set(tp, NVRAM_BUFFERED);
12250 tg3_flag_set(tp, FLASH);
12251 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12252 tp->nvram_pagesize = 256;
12254 case FLASH_5761VENDOR_ST_A_M45PE20:
12255 case FLASH_5761VENDOR_ST_A_M45PE40:
12256 case FLASH_5761VENDOR_ST_A_M45PE80:
12257 case FLASH_5761VENDOR_ST_A_M45PE16:
12258 case FLASH_5761VENDOR_ST_M_M45PE20:
12259 case FLASH_5761VENDOR_ST_M_M45PE40:
12260 case FLASH_5761VENDOR_ST_M_M45PE80:
12261 case FLASH_5761VENDOR_ST_M_M45PE16:
12262 tp->nvram_jedecnum = JEDEC_ST;
12263 tg3_flag_set(tp, NVRAM_BUFFERED);
12264 tg3_flag_set(tp, FLASH);
12265 tp->nvram_pagesize = 256;
12270 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12273 case FLASH_5761VENDOR_ATMEL_ADB161D:
12274 case FLASH_5761VENDOR_ATMEL_MDB161D:
12275 case FLASH_5761VENDOR_ST_A_M45PE16:
12276 case FLASH_5761VENDOR_ST_M_M45PE16:
12277 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12279 case FLASH_5761VENDOR_ATMEL_ADB081D:
12280 case FLASH_5761VENDOR_ATMEL_MDB081D:
12281 case FLASH_5761VENDOR_ST_A_M45PE80:
12282 case FLASH_5761VENDOR_ST_M_M45PE80:
12283 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12285 case FLASH_5761VENDOR_ATMEL_ADB041D:
12286 case FLASH_5761VENDOR_ATMEL_MDB041D:
12287 case FLASH_5761VENDOR_ST_A_M45PE40:
12288 case FLASH_5761VENDOR_ST_M_M45PE40:
12289 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12291 case FLASH_5761VENDOR_ATMEL_ADB021D:
12292 case FLASH_5761VENDOR_ATMEL_MDB021D:
12293 case FLASH_5761VENDOR_ST_A_M45PE20:
12294 case FLASH_5761VENDOR_ST_M_M45PE20:
12295 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12301 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12303 tp->nvram_jedecnum = JEDEC_ATMEL;
12304 tg3_flag_set(tp, NVRAM_BUFFERED);
12305 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12308 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12312 nvcfg1 = tr32(NVRAM_CFG1);
12314 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12315 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12316 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12317 tp->nvram_jedecnum = JEDEC_ATMEL;
12318 tg3_flag_set(tp, NVRAM_BUFFERED);
12319 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12321 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12322 tw32(NVRAM_CFG1, nvcfg1);
12324 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12325 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12326 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12327 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12328 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12329 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12330 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12331 tp->nvram_jedecnum = JEDEC_ATMEL;
12332 tg3_flag_set(tp, NVRAM_BUFFERED);
12333 tg3_flag_set(tp, FLASH);
12335 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12336 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12337 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12338 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12339 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12341 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12342 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12343 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12345 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12346 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12347 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12351 case FLASH_5752VENDOR_ST_M45PE10:
12352 case FLASH_5752VENDOR_ST_M45PE20:
12353 case FLASH_5752VENDOR_ST_M45PE40:
12354 tp->nvram_jedecnum = JEDEC_ST;
12355 tg3_flag_set(tp, NVRAM_BUFFERED);
12356 tg3_flag_set(tp, FLASH);
12358 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12359 case FLASH_5752VENDOR_ST_M45PE10:
12360 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12362 case FLASH_5752VENDOR_ST_M45PE20:
12363 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12365 case FLASH_5752VENDOR_ST_M45PE40:
12366 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12371 tg3_flag_set(tp, NO_NVRAM);
12375 tg3_nvram_get_pagesize(tp, nvcfg1);
12376 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12377 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12381 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12385 nvcfg1 = tr32(NVRAM_CFG1);
12387 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12388 case FLASH_5717VENDOR_ATMEL_EEPROM:
12389 case FLASH_5717VENDOR_MICRO_EEPROM:
12390 tp->nvram_jedecnum = JEDEC_ATMEL;
12391 tg3_flag_set(tp, NVRAM_BUFFERED);
12392 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12394 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12395 tw32(NVRAM_CFG1, nvcfg1);
12397 case FLASH_5717VENDOR_ATMEL_MDB011D:
12398 case FLASH_5717VENDOR_ATMEL_ADB011B:
12399 case FLASH_5717VENDOR_ATMEL_ADB011D:
12400 case FLASH_5717VENDOR_ATMEL_MDB021D:
12401 case FLASH_5717VENDOR_ATMEL_ADB021B:
12402 case FLASH_5717VENDOR_ATMEL_ADB021D:
12403 case FLASH_5717VENDOR_ATMEL_45USPT:
12404 tp->nvram_jedecnum = JEDEC_ATMEL;
12405 tg3_flag_set(tp, NVRAM_BUFFERED);
12406 tg3_flag_set(tp, FLASH);
12408 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12409 case FLASH_5717VENDOR_ATMEL_MDB021D:
12410 /* Detect size with tg3_nvram_get_size() */
12412 case FLASH_5717VENDOR_ATMEL_ADB021B:
12413 case FLASH_5717VENDOR_ATMEL_ADB021D:
12414 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12417 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12421 case FLASH_5717VENDOR_ST_M_M25PE10:
12422 case FLASH_5717VENDOR_ST_A_M25PE10:
12423 case FLASH_5717VENDOR_ST_M_M45PE10:
12424 case FLASH_5717VENDOR_ST_A_M45PE10:
12425 case FLASH_5717VENDOR_ST_M_M25PE20:
12426 case FLASH_5717VENDOR_ST_A_M25PE20:
12427 case FLASH_5717VENDOR_ST_M_M45PE20:
12428 case FLASH_5717VENDOR_ST_A_M45PE20:
12429 case FLASH_5717VENDOR_ST_25USPT:
12430 case FLASH_5717VENDOR_ST_45USPT:
12431 tp->nvram_jedecnum = JEDEC_ST;
12432 tg3_flag_set(tp, NVRAM_BUFFERED);
12433 tg3_flag_set(tp, FLASH);
12435 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12436 case FLASH_5717VENDOR_ST_M_M25PE20:
12437 case FLASH_5717VENDOR_ST_M_M45PE20:
12438 /* Detect size with tg3_nvram_get_size() */
12440 case FLASH_5717VENDOR_ST_A_M25PE20:
12441 case FLASH_5717VENDOR_ST_A_M45PE20:
12442 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12445 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12450 tg3_flag_set(tp, NO_NVRAM);
12454 tg3_nvram_get_pagesize(tp, nvcfg1);
12455 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12456 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12459 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12461 u32 nvcfg1, nvmpinstrp;
12463 nvcfg1 = tr32(NVRAM_CFG1);
12464 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12466 switch (nvmpinstrp) {
12467 case FLASH_5720_EEPROM_HD:
12468 case FLASH_5720_EEPROM_LD:
12469 tp->nvram_jedecnum = JEDEC_ATMEL;
12470 tg3_flag_set(tp, NVRAM_BUFFERED);
12472 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12473 tw32(NVRAM_CFG1, nvcfg1);
12474 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12475 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12477 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12479 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12480 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12481 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12482 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12483 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12484 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12485 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12486 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12487 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12488 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12489 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12490 case FLASH_5720VENDOR_ATMEL_45USPT:
12491 tp->nvram_jedecnum = JEDEC_ATMEL;
12492 tg3_flag_set(tp, NVRAM_BUFFERED);
12493 tg3_flag_set(tp, FLASH);
12495 switch (nvmpinstrp) {
12496 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12497 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12498 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12499 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12501 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12502 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12503 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12504 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12506 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12507 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12508 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12511 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12515 case FLASH_5720VENDOR_M_ST_M25PE10:
12516 case FLASH_5720VENDOR_M_ST_M45PE10:
12517 case FLASH_5720VENDOR_A_ST_M25PE10:
12518 case FLASH_5720VENDOR_A_ST_M45PE10:
12519 case FLASH_5720VENDOR_M_ST_M25PE20:
12520 case FLASH_5720VENDOR_M_ST_M45PE20:
12521 case FLASH_5720VENDOR_A_ST_M25PE20:
12522 case FLASH_5720VENDOR_A_ST_M45PE20:
12523 case FLASH_5720VENDOR_M_ST_M25PE40:
12524 case FLASH_5720VENDOR_M_ST_M45PE40:
12525 case FLASH_5720VENDOR_A_ST_M25PE40:
12526 case FLASH_5720VENDOR_A_ST_M45PE40:
12527 case FLASH_5720VENDOR_M_ST_M25PE80:
12528 case FLASH_5720VENDOR_M_ST_M45PE80:
12529 case FLASH_5720VENDOR_A_ST_M25PE80:
12530 case FLASH_5720VENDOR_A_ST_M45PE80:
12531 case FLASH_5720VENDOR_ST_25USPT:
12532 case FLASH_5720VENDOR_ST_45USPT:
12533 tp->nvram_jedecnum = JEDEC_ST;
12534 tg3_flag_set(tp, NVRAM_BUFFERED);
12535 tg3_flag_set(tp, FLASH);
12537 switch (nvmpinstrp) {
12538 case FLASH_5720VENDOR_M_ST_M25PE20:
12539 case FLASH_5720VENDOR_M_ST_M45PE20:
12540 case FLASH_5720VENDOR_A_ST_M25PE20:
12541 case FLASH_5720VENDOR_A_ST_M45PE20:
12542 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12544 case FLASH_5720VENDOR_M_ST_M25PE40:
12545 case FLASH_5720VENDOR_M_ST_M45PE40:
12546 case FLASH_5720VENDOR_A_ST_M25PE40:
12547 case FLASH_5720VENDOR_A_ST_M45PE40:
12548 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12550 case FLASH_5720VENDOR_M_ST_M25PE80:
12551 case FLASH_5720VENDOR_M_ST_M45PE80:
12552 case FLASH_5720VENDOR_A_ST_M25PE80:
12553 case FLASH_5720VENDOR_A_ST_M45PE80:
12554 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12557 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12562 tg3_flag_set(tp, NO_NVRAM);
12566 tg3_nvram_get_pagesize(tp, nvcfg1);
12567 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12568 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12571 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12572 static void __devinit tg3_nvram_init(struct tg3 *tp)
12574 tw32_f(GRC_EEPROM_ADDR,
12575 (EEPROM_ADDR_FSM_RESET |
12576 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12577 EEPROM_ADDR_CLKPERD_SHIFT)));
12581 /* Enable seeprom accesses. */
12582 tw32_f(GRC_LOCAL_CTRL,
12583 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12586 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12587 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12588 tg3_flag_set(tp, NVRAM);
12590 if (tg3_nvram_lock(tp)) {
12591 netdev_warn(tp->dev,
12592 "Cannot get nvram lock, %s failed\n",
12596 tg3_enable_nvram_access(tp);
12598 tp->nvram_size = 0;
12600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12601 tg3_get_5752_nvram_info(tp);
12602 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12603 tg3_get_5755_nvram_info(tp);
12604 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12607 tg3_get_5787_nvram_info(tp);
12608 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12609 tg3_get_5761_nvram_info(tp);
12610 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12611 tg3_get_5906_nvram_info(tp);
12612 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12614 tg3_get_57780_nvram_info(tp);
12615 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12617 tg3_get_5717_nvram_info(tp);
12618 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12619 tg3_get_5720_nvram_info(tp);
12621 tg3_get_nvram_info(tp);
12623 if (tp->nvram_size == 0)
12624 tg3_get_nvram_size(tp);
12626 tg3_disable_nvram_access(tp);
12627 tg3_nvram_unlock(tp);
12630 tg3_flag_clear(tp, NVRAM);
12631 tg3_flag_clear(tp, NVRAM_BUFFERED);
12633 tg3_get_eeprom_size(tp);
12637 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12638 u32 offset, u32 len, u8 *buf)
12643 for (i = 0; i < len; i += 4) {
12649 memcpy(&data, buf + i, 4);
12652 * The SEEPROM interface expects the data to always be opposite
12653 * the native endian format. We accomplish this by reversing
12654 * all the operations that would have been performed on the
12655 * data from a call to tg3_nvram_read_be32().
12657 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12659 val = tr32(GRC_EEPROM_ADDR);
12660 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12662 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12664 tw32(GRC_EEPROM_ADDR, val |
12665 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12666 (addr & EEPROM_ADDR_ADDR_MASK) |
12667 EEPROM_ADDR_START |
12668 EEPROM_ADDR_WRITE);
12670 for (j = 0; j < 1000; j++) {
12671 val = tr32(GRC_EEPROM_ADDR);
12673 if (val & EEPROM_ADDR_COMPLETE)
12677 if (!(val & EEPROM_ADDR_COMPLETE)) {
12686 /* offset and length are dword aligned */
12687 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12691 u32 pagesize = tp->nvram_pagesize;
12692 u32 pagemask = pagesize - 1;
12696 tmp = kmalloc(pagesize, GFP_KERNEL);
12702 u32 phy_addr, page_off, size;
12704 phy_addr = offset & ~pagemask;
12706 for (j = 0; j < pagesize; j += 4) {
12707 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12708 (__be32 *) (tmp + j));
12715 page_off = offset & pagemask;
12722 memcpy(tmp + page_off, buf, size);
12724 offset = offset + (pagesize - page_off);
12726 tg3_enable_nvram_access(tp);
12729 * Before we can erase the flash page, we need
12730 * to issue a special "write enable" command.
12732 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12734 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12737 /* Erase the target page */
12738 tw32(NVRAM_ADDR, phy_addr);
12740 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12741 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12743 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12746 /* Issue another write enable to start the write. */
12747 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12749 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12752 for (j = 0; j < pagesize; j += 4) {
12755 data = *((__be32 *) (tmp + j));
12757 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12759 tw32(NVRAM_ADDR, phy_addr + j);
12761 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12765 nvram_cmd |= NVRAM_CMD_FIRST;
12766 else if (j == (pagesize - 4))
12767 nvram_cmd |= NVRAM_CMD_LAST;
12769 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12776 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12777 tg3_nvram_exec_cmd(tp, nvram_cmd);
12784 /* offset and length are dword aligned */
12785 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12790 for (i = 0; i < len; i += 4, offset += 4) {
12791 u32 page_off, phy_addr, nvram_cmd;
12794 memcpy(&data, buf + i, 4);
12795 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12797 page_off = offset % tp->nvram_pagesize;
12799 phy_addr = tg3_nvram_phys_addr(tp, offset);
12801 tw32(NVRAM_ADDR, phy_addr);
12803 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12805 if (page_off == 0 || i == 0)
12806 nvram_cmd |= NVRAM_CMD_FIRST;
12807 if (page_off == (tp->nvram_pagesize - 4))
12808 nvram_cmd |= NVRAM_CMD_LAST;
12810 if (i == (len - 4))
12811 nvram_cmd |= NVRAM_CMD_LAST;
12813 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12814 !tg3_flag(tp, 5755_PLUS) &&
12815 (tp->nvram_jedecnum == JEDEC_ST) &&
12816 (nvram_cmd & NVRAM_CMD_FIRST)) {
12818 if ((ret = tg3_nvram_exec_cmd(tp,
12819 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12824 if (!tg3_flag(tp, FLASH)) {
12825 /* We always do complete word writes to eeprom. */
12826 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12829 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12835 /* offset and length are dword aligned */
12836 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12840 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12841 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12842 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12846 if (!tg3_flag(tp, NVRAM)) {
12847 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12851 ret = tg3_nvram_lock(tp);
12855 tg3_enable_nvram_access(tp);
12856 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12857 tw32(NVRAM_WRITE1, 0x406);
12859 grc_mode = tr32(GRC_MODE);
12860 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12862 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12863 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12866 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12870 grc_mode = tr32(GRC_MODE);
12871 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12873 tg3_disable_nvram_access(tp);
12874 tg3_nvram_unlock(tp);
12877 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12878 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12885 struct subsys_tbl_ent {
12886 u16 subsys_vendor, subsys_devid;
12890 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12891 /* Broadcom boards. */
12892 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12893 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12894 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12895 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12896 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12897 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12898 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12899 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12900 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12901 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12902 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12904 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12906 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12908 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12909 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12910 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12911 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12912 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12913 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12916 { TG3PCI_SUBVENDOR_ID_3COM,
12917 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12918 { TG3PCI_SUBVENDOR_ID_3COM,
12919 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12920 { TG3PCI_SUBVENDOR_ID_3COM,
12921 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12922 { TG3PCI_SUBVENDOR_ID_3COM,
12923 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12924 { TG3PCI_SUBVENDOR_ID_3COM,
12925 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12928 { TG3PCI_SUBVENDOR_ID_DELL,
12929 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12930 { TG3PCI_SUBVENDOR_ID_DELL,
12931 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12932 { TG3PCI_SUBVENDOR_ID_DELL,
12933 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12934 { TG3PCI_SUBVENDOR_ID_DELL,
12935 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12937 /* Compaq boards. */
12938 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12939 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12940 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12941 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12942 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12943 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12944 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12945 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12946 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12947 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12950 { TG3PCI_SUBVENDOR_ID_IBM,
12951 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12954 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12958 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12959 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12960 tp->pdev->subsystem_vendor) &&
12961 (subsys_id_to_phy_id[i].subsys_devid ==
12962 tp->pdev->subsystem_device))
12963 return &subsys_id_to_phy_id[i];
12968 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12972 tp->phy_id = TG3_PHY_ID_INVALID;
12973 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12975 /* Assume an onboard device and WOL capable by default. */
12976 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12977 tg3_flag_set(tp, WOL_CAP);
12979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12980 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12981 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12982 tg3_flag_set(tp, IS_NIC);
12984 val = tr32(VCPU_CFGSHDW);
12985 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12986 tg3_flag_set(tp, ASPM_WORKAROUND);
12987 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12988 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12989 tg3_flag_set(tp, WOL_ENABLE);
12990 device_set_wakeup_enable(&tp->pdev->dev, true);
12995 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12996 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12997 u32 nic_cfg, led_cfg;
12998 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12999 int eeprom_phy_serdes = 0;
13001 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13002 tp->nic_sram_data_cfg = nic_cfg;
13004 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13005 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13006 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13007 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13008 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13009 (ver > 0) && (ver < 0x100))
13010 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13013 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13015 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13016 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13017 eeprom_phy_serdes = 1;
13019 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13020 if (nic_phy_id != 0) {
13021 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13022 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13024 eeprom_phy_id = (id1 >> 16) << 10;
13025 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13026 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13030 tp->phy_id = eeprom_phy_id;
13031 if (eeprom_phy_serdes) {
13032 if (!tg3_flag(tp, 5705_PLUS))
13033 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13035 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13038 if (tg3_flag(tp, 5750_PLUS))
13039 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13040 SHASTA_EXT_LED_MODE_MASK);
13042 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13046 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13047 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13050 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13051 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13054 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13055 tp->led_ctrl = LED_CTRL_MODE_MAC;
13057 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13058 * read on some older 5700/5701 bootcode.
13060 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13062 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13064 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13068 case SHASTA_EXT_LED_SHARED:
13069 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13070 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13071 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13072 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13073 LED_CTRL_MODE_PHY_2);
13076 case SHASTA_EXT_LED_MAC:
13077 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13080 case SHASTA_EXT_LED_COMBO:
13081 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13082 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13083 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13084 LED_CTRL_MODE_PHY_2);
13089 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13091 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13092 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13094 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13095 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13097 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13098 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13099 if ((tp->pdev->subsystem_vendor ==
13100 PCI_VENDOR_ID_ARIMA) &&
13101 (tp->pdev->subsystem_device == 0x205a ||
13102 tp->pdev->subsystem_device == 0x2063))
13103 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13105 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13106 tg3_flag_set(tp, IS_NIC);
13109 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13110 tg3_flag_set(tp, ENABLE_ASF);
13111 if (tg3_flag(tp, 5750_PLUS))
13112 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13115 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13116 tg3_flag(tp, 5750_PLUS))
13117 tg3_flag_set(tp, ENABLE_APE);
13119 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13120 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13121 tg3_flag_clear(tp, WOL_CAP);
13123 if (tg3_flag(tp, WOL_CAP) &&
13124 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13125 tg3_flag_set(tp, WOL_ENABLE);
13126 device_set_wakeup_enable(&tp->pdev->dev, true);
13129 if (cfg2 & (1 << 17))
13130 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13132 /* serdes signal pre-emphasis in register 0x590 set by */
13133 /* bootcode if bit 18 is set */
13134 if (cfg2 & (1 << 18))
13135 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13137 if ((tg3_flag(tp, 57765_PLUS) ||
13138 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13139 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13140 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13141 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13143 if (tg3_flag(tp, PCI_EXPRESS) &&
13144 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13145 !tg3_flag(tp, 57765_PLUS)) {
13148 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13149 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13150 tg3_flag_set(tp, ASPM_WORKAROUND);
13153 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13154 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13155 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13156 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13157 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13158 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13161 if (tg3_flag(tp, WOL_CAP))
13162 device_set_wakeup_enable(&tp->pdev->dev,
13163 tg3_flag(tp, WOL_ENABLE));
13165 device_set_wakeup_capable(&tp->pdev->dev, false);
13168 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13173 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13174 tw32(OTP_CTRL, cmd);
13176 /* Wait for up to 1 ms for command to execute. */
13177 for (i = 0; i < 100; i++) {
13178 val = tr32(OTP_STATUS);
13179 if (val & OTP_STATUS_CMD_DONE)
13184 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13187 /* Read the gphy configuration from the OTP region of the chip. The gphy
13188 * configuration is a 32-bit value that straddles the alignment boundary.
13189 * We do two 32-bit reads and then shift and merge the results.
13191 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13193 u32 bhalf_otp, thalf_otp;
13195 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13197 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13200 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13202 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13205 thalf_otp = tr32(OTP_READ_DATA);
13207 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13209 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13212 bhalf_otp = tr32(OTP_READ_DATA);
13214 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13217 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13219 u32 adv = ADVERTISED_Autoneg |
13222 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13223 adv |= ADVERTISED_1000baseT_Half |
13224 ADVERTISED_1000baseT_Full;
13226 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13227 adv |= ADVERTISED_100baseT_Half |
13228 ADVERTISED_100baseT_Full |
13229 ADVERTISED_10baseT_Half |
13230 ADVERTISED_10baseT_Full |
13233 adv |= ADVERTISED_FIBRE;
13235 tp->link_config.advertising = adv;
13236 tp->link_config.speed = SPEED_INVALID;
13237 tp->link_config.duplex = DUPLEX_INVALID;
13238 tp->link_config.autoneg = AUTONEG_ENABLE;
13239 tp->link_config.active_speed = SPEED_INVALID;
13240 tp->link_config.active_duplex = DUPLEX_INVALID;
13241 tp->link_config.orig_speed = SPEED_INVALID;
13242 tp->link_config.orig_duplex = DUPLEX_INVALID;
13243 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13246 static int __devinit tg3_phy_probe(struct tg3 *tp)
13248 u32 hw_phy_id_1, hw_phy_id_2;
13249 u32 hw_phy_id, hw_phy_id_masked;
13252 /* flow control autonegotiation is default behavior */
13253 tg3_flag_set(tp, PAUSE_AUTONEG);
13254 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13256 if (tg3_flag(tp, USE_PHYLIB))
13257 return tg3_phy_init(tp);
13259 /* Reading the PHY ID register can conflict with ASF
13260 * firmware access to the PHY hardware.
13263 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13264 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13266 /* Now read the physical PHY_ID from the chip and verify
13267 * that it is sane. If it doesn't look good, we fall back
13268 * to either the hard-coded table based PHY_ID and failing
13269 * that the value found in the eeprom area.
13271 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13272 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13274 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13275 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13276 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13278 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13281 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13282 tp->phy_id = hw_phy_id;
13283 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13284 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13286 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13288 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13289 /* Do nothing, phy ID already set up in
13290 * tg3_get_eeprom_hw_cfg().
13293 struct subsys_tbl_ent *p;
13295 /* No eeprom signature? Try the hardcoded
13296 * subsys device table.
13298 p = tg3_lookup_by_subsys(tp);
13302 tp->phy_id = p->phy_id;
13304 tp->phy_id == TG3_PHY_ID_BCM8002)
13305 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13309 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13310 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13312 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13313 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13314 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13315 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13316 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13318 tg3_phy_init_link_config(tp);
13320 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13321 !tg3_flag(tp, ENABLE_APE) &&
13322 !tg3_flag(tp, ENABLE_ASF)) {
13325 tg3_readphy(tp, MII_BMSR, &bmsr);
13326 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13327 (bmsr & BMSR_LSTATUS))
13328 goto skip_phy_reset;
13330 err = tg3_phy_reset(tp);
13334 tg3_phy_set_wirespeed(tp);
13336 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13337 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13338 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13339 if (!tg3_copper_is_advertising_all(tp, mask)) {
13340 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13341 tp->link_config.flowctrl);
13343 tg3_writephy(tp, MII_BMCR,
13344 BMCR_ANENABLE | BMCR_ANRESTART);
13349 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13350 err = tg3_init_5401phy_dsp(tp);
13354 err = tg3_init_5401phy_dsp(tp);
13360 static void __devinit tg3_read_vpd(struct tg3 *tp)
13363 unsigned int block_end, rosize, len;
13367 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13371 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13373 goto out_not_found;
13375 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13376 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13377 i += PCI_VPD_LRDT_TAG_SIZE;
13379 if (block_end > vpdlen)
13380 goto out_not_found;
13382 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13383 PCI_VPD_RO_KEYWORD_MFR_ID);
13385 len = pci_vpd_info_field_size(&vpd_data[j]);
13387 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13388 if (j + len > block_end || len != 4 ||
13389 memcmp(&vpd_data[j], "1028", 4))
13392 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13393 PCI_VPD_RO_KEYWORD_VENDOR0);
13397 len = pci_vpd_info_field_size(&vpd_data[j]);
13399 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13400 if (j + len > block_end)
13403 memcpy(tp->fw_ver, &vpd_data[j], len);
13404 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13408 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13409 PCI_VPD_RO_KEYWORD_PARTNO);
13411 goto out_not_found;
13413 len = pci_vpd_info_field_size(&vpd_data[i]);
13415 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13416 if (len > TG3_BPN_SIZE ||
13417 (len + i) > vpdlen)
13418 goto out_not_found;
13420 memcpy(tp->board_part_number, &vpd_data[i], len);
13424 if (tp->board_part_number[0])
13428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13429 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13430 strcpy(tp->board_part_number, "BCM5717");
13431 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13432 strcpy(tp->board_part_number, "BCM5718");
13435 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13436 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13437 strcpy(tp->board_part_number, "BCM57780");
13438 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13439 strcpy(tp->board_part_number, "BCM57760");
13440 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13441 strcpy(tp->board_part_number, "BCM57790");
13442 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13443 strcpy(tp->board_part_number, "BCM57788");
13446 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13447 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13448 strcpy(tp->board_part_number, "BCM57761");
13449 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13450 strcpy(tp->board_part_number, "BCM57765");
13451 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13452 strcpy(tp->board_part_number, "BCM57781");
13453 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13454 strcpy(tp->board_part_number, "BCM57785");
13455 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13456 strcpy(tp->board_part_number, "BCM57791");
13457 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13458 strcpy(tp->board_part_number, "BCM57795");
13461 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13462 strcpy(tp->board_part_number, "BCM95906");
13465 strcpy(tp->board_part_number, "none");
13469 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13473 if (tg3_nvram_read(tp, offset, &val) ||
13474 (val & 0xfc000000) != 0x0c000000 ||
13475 tg3_nvram_read(tp, offset + 4, &val) ||
13482 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13484 u32 val, offset, start, ver_offset;
13486 bool newver = false;
13488 if (tg3_nvram_read(tp, 0xc, &offset) ||
13489 tg3_nvram_read(tp, 0x4, &start))
13492 offset = tg3_nvram_logical_addr(tp, offset);
13494 if (tg3_nvram_read(tp, offset, &val))
13497 if ((val & 0xfc000000) == 0x0c000000) {
13498 if (tg3_nvram_read(tp, offset + 4, &val))
13505 dst_off = strlen(tp->fw_ver);
13508 if (TG3_VER_SIZE - dst_off < 16 ||
13509 tg3_nvram_read(tp, offset + 8, &ver_offset))
13512 offset = offset + ver_offset - start;
13513 for (i = 0; i < 16; i += 4) {
13515 if (tg3_nvram_read_be32(tp, offset + i, &v))
13518 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13523 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13526 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13527 TG3_NVM_BCVER_MAJSFT;
13528 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13529 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13530 "v%d.%02d", major, minor);
13534 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13536 u32 val, major, minor;
13538 /* Use native endian representation */
13539 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13542 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13543 TG3_NVM_HWSB_CFG1_MAJSFT;
13544 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13545 TG3_NVM_HWSB_CFG1_MINSFT;
13547 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13550 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13552 u32 offset, major, minor, build;
13554 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13556 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13559 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13560 case TG3_EEPROM_SB_REVISION_0:
13561 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13563 case TG3_EEPROM_SB_REVISION_2:
13564 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13566 case TG3_EEPROM_SB_REVISION_3:
13567 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13569 case TG3_EEPROM_SB_REVISION_4:
13570 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13572 case TG3_EEPROM_SB_REVISION_5:
13573 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13575 case TG3_EEPROM_SB_REVISION_6:
13576 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13582 if (tg3_nvram_read(tp, offset, &val))
13585 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13586 TG3_EEPROM_SB_EDH_BLD_SHFT;
13587 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13588 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13589 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13591 if (minor > 99 || build > 26)
13594 offset = strlen(tp->fw_ver);
13595 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13596 " v%d.%02d", major, minor);
13599 offset = strlen(tp->fw_ver);
13600 if (offset < TG3_VER_SIZE - 1)
13601 tp->fw_ver[offset] = 'a' + build - 1;
13605 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13607 u32 val, offset, start;
13610 for (offset = TG3_NVM_DIR_START;
13611 offset < TG3_NVM_DIR_END;
13612 offset += TG3_NVM_DIRENT_SIZE) {
13613 if (tg3_nvram_read(tp, offset, &val))
13616 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13620 if (offset == TG3_NVM_DIR_END)
13623 if (!tg3_flag(tp, 5705_PLUS))
13624 start = 0x08000000;
13625 else if (tg3_nvram_read(tp, offset - 4, &start))
13628 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13629 !tg3_fw_img_is_valid(tp, offset) ||
13630 tg3_nvram_read(tp, offset + 8, &val))
13633 offset += val - start;
13635 vlen = strlen(tp->fw_ver);
13637 tp->fw_ver[vlen++] = ',';
13638 tp->fw_ver[vlen++] = ' ';
13640 for (i = 0; i < 4; i++) {
13642 if (tg3_nvram_read_be32(tp, offset, &v))
13645 offset += sizeof(v);
13647 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13648 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13652 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13657 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13663 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13666 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13667 if (apedata != APE_SEG_SIG_MAGIC)
13670 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13671 if (!(apedata & APE_FW_STATUS_READY))
13674 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13676 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13677 tg3_flag_set(tp, APE_HAS_NCSI);
13683 vlen = strlen(tp->fw_ver);
13685 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13687 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13688 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13689 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13690 (apedata & APE_FW_VERSION_BLDMSK));
13693 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13696 bool vpd_vers = false;
13698 if (tp->fw_ver[0] != 0)
13701 if (tg3_flag(tp, NO_NVRAM)) {
13702 strcat(tp->fw_ver, "sb");
13706 if (tg3_nvram_read(tp, 0, &val))
13709 if (val == TG3_EEPROM_MAGIC)
13710 tg3_read_bc_ver(tp);
13711 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13712 tg3_read_sb_ver(tp, val);
13713 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13714 tg3_read_hwsb_ver(tp);
13721 if (tg3_flag(tp, ENABLE_APE)) {
13722 if (tg3_flag(tp, ENABLE_ASF))
13723 tg3_read_dash_ver(tp);
13724 } else if (tg3_flag(tp, ENABLE_ASF)) {
13725 tg3_read_mgmtfw_ver(tp);
13729 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13732 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13734 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13736 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13737 return TG3_RX_RET_MAX_SIZE_5717;
13738 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13739 return TG3_RX_RET_MAX_SIZE_5700;
13741 return TG3_RX_RET_MAX_SIZE_5705;
13744 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13745 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13746 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13747 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13751 static int __devinit tg3_get_invariants(struct tg3 *tp)
13754 u32 pci_state_reg, grc_misc_cfg;
13759 /* Force memory write invalidate off. If we leave it on,
13760 * then on 5700_BX chips we have to enable a workaround.
13761 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13762 * to match the cacheline size. The Broadcom driver have this
13763 * workaround but turns MWI off all the times so never uses
13764 * it. This seems to suggest that the workaround is insufficient.
13766 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13767 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13768 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13770 /* Important! -- Make sure register accesses are byteswapped
13771 * correctly. Also, for those chips that require it, make
13772 * sure that indirect register accesses are enabled before
13773 * the first operation.
13775 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13777 tp->misc_host_ctrl |= (misc_ctrl_reg &
13778 MISC_HOST_CTRL_CHIPREV);
13779 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13780 tp->misc_host_ctrl);
13782 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13783 MISC_HOST_CTRL_CHIPREV_SHIFT);
13784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13785 u32 prod_id_asic_rev;
13787 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13790 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13791 pci_read_config_dword(tp->pdev,
13792 TG3PCI_GEN2_PRODID_ASICREV,
13793 &prod_id_asic_rev);
13794 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13795 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13796 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13797 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13798 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13799 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13800 pci_read_config_dword(tp->pdev,
13801 TG3PCI_GEN15_PRODID_ASICREV,
13802 &prod_id_asic_rev);
13804 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13805 &prod_id_asic_rev);
13807 tp->pci_chip_rev_id = prod_id_asic_rev;
13810 /* Wrong chip ID in 5752 A0. This code can be removed later
13811 * as A0 is not in production.
13813 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13814 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13816 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13817 * we need to disable memory and use config. cycles
13818 * only to access all registers. The 5702/03 chips
13819 * can mistakenly decode the special cycles from the
13820 * ICH chipsets as memory write cycles, causing corruption
13821 * of register and memory space. Only certain ICH bridges
13822 * will drive special cycles with non-zero data during the
13823 * address phase which can fall within the 5703's address
13824 * range. This is not an ICH bug as the PCI spec allows
13825 * non-zero address during special cycles. However, only
13826 * these ICH bridges are known to drive non-zero addresses
13827 * during special cycles.
13829 * Since special cycles do not cross PCI bridges, we only
13830 * enable this workaround if the 5703 is on the secondary
13831 * bus of these ICH bridges.
13833 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13834 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13835 static struct tg3_dev_id {
13839 } ich_chipsets[] = {
13840 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13842 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13844 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13846 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13850 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13851 struct pci_dev *bridge = NULL;
13853 while (pci_id->vendor != 0) {
13854 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13860 if (pci_id->rev != PCI_ANY_ID) {
13861 if (bridge->revision > pci_id->rev)
13864 if (bridge->subordinate &&
13865 (bridge->subordinate->number ==
13866 tp->pdev->bus->number)) {
13867 tg3_flag_set(tp, ICH_WORKAROUND);
13868 pci_dev_put(bridge);
13874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13875 static struct tg3_dev_id {
13878 } bridge_chipsets[] = {
13879 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13880 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13883 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13884 struct pci_dev *bridge = NULL;
13886 while (pci_id->vendor != 0) {
13887 bridge = pci_get_device(pci_id->vendor,
13894 if (bridge->subordinate &&
13895 (bridge->subordinate->number <=
13896 tp->pdev->bus->number) &&
13897 (bridge->subordinate->subordinate >=
13898 tp->pdev->bus->number)) {
13899 tg3_flag_set(tp, 5701_DMA_BUG);
13900 pci_dev_put(bridge);
13906 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13907 * DMA addresses > 40-bit. This bridge may have other additional
13908 * 57xx devices behind it in some 4-port NIC designs for example.
13909 * Any tg3 device found behind the bridge will also need the 40-bit
13912 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13914 tg3_flag_set(tp, 5780_CLASS);
13915 tg3_flag_set(tp, 40BIT_DMA_BUG);
13916 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13918 struct pci_dev *bridge = NULL;
13921 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13922 PCI_DEVICE_ID_SERVERWORKS_EPB,
13924 if (bridge && bridge->subordinate &&
13925 (bridge->subordinate->number <=
13926 tp->pdev->bus->number) &&
13927 (bridge->subordinate->subordinate >=
13928 tp->pdev->bus->number)) {
13929 tg3_flag_set(tp, 40BIT_DMA_BUG);
13930 pci_dev_put(bridge);
13936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13938 tp->pdev_peer = tg3_find_peer(tp);
13940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13943 tg3_flag_set(tp, 5717_PLUS);
13945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13946 tg3_flag(tp, 5717_PLUS))
13947 tg3_flag_set(tp, 57765_PLUS);
13949 /* Intentionally exclude ASIC_REV_5906 */
13950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13952 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13956 tg3_flag(tp, 57765_PLUS))
13957 tg3_flag_set(tp, 5755_PLUS);
13959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13961 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13962 tg3_flag(tp, 5755_PLUS) ||
13963 tg3_flag(tp, 5780_CLASS))
13964 tg3_flag_set(tp, 5750_PLUS);
13966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13967 tg3_flag(tp, 5750_PLUS))
13968 tg3_flag_set(tp, 5705_PLUS);
13970 /* Determine TSO capabilities */
13971 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13972 ; /* Do nothing. HW bug. */
13973 else if (tg3_flag(tp, 57765_PLUS))
13974 tg3_flag_set(tp, HW_TSO_3);
13975 else if (tg3_flag(tp, 5755_PLUS) ||
13976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13977 tg3_flag_set(tp, HW_TSO_2);
13978 else if (tg3_flag(tp, 5750_PLUS)) {
13979 tg3_flag_set(tp, HW_TSO_1);
13980 tg3_flag_set(tp, TSO_BUG);
13981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13982 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13983 tg3_flag_clear(tp, TSO_BUG);
13984 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13985 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13986 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13987 tg3_flag_set(tp, TSO_BUG);
13988 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13989 tp->fw_needed = FIRMWARE_TG3TSO5;
13991 tp->fw_needed = FIRMWARE_TG3TSO;
13994 /* Selectively allow TSO based on operating conditions */
13995 if (tg3_flag(tp, HW_TSO_1) ||
13996 tg3_flag(tp, HW_TSO_2) ||
13997 tg3_flag(tp, HW_TSO_3) ||
13998 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13999 tg3_flag_set(tp, TSO_CAPABLE);
14001 tg3_flag_clear(tp, TSO_CAPABLE);
14002 tg3_flag_clear(tp, TSO_BUG);
14003 tp->fw_needed = NULL;
14006 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14007 tp->fw_needed = FIRMWARE_TG3;
14011 if (tg3_flag(tp, 5750_PLUS)) {
14012 tg3_flag_set(tp, SUPPORT_MSI);
14013 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14014 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14015 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14016 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14017 tp->pdev_peer == tp->pdev))
14018 tg3_flag_clear(tp, SUPPORT_MSI);
14020 if (tg3_flag(tp, 5755_PLUS) ||
14021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14022 tg3_flag_set(tp, 1SHOT_MSI);
14025 if (tg3_flag(tp, 57765_PLUS)) {
14026 tg3_flag_set(tp, SUPPORT_MSIX);
14027 tp->irq_max = TG3_IRQ_MAX_VECS;
14031 if (tg3_flag(tp, 5755_PLUS))
14032 tg3_flag_set(tp, SHORT_DMA_BUG);
14034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14035 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14037 if (tg3_flag(tp, 5717_PLUS))
14038 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14040 if (tg3_flag(tp, 57765_PLUS) &&
14041 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14042 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14044 if (!tg3_flag(tp, 5705_PLUS) ||
14045 tg3_flag(tp, 5780_CLASS) ||
14046 tg3_flag(tp, USE_JUMBO_BDFLAG))
14047 tg3_flag_set(tp, JUMBO_CAPABLE);
14049 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14052 if (pci_is_pcie(tp->pdev)) {
14055 tg3_flag_set(tp, PCI_EXPRESS);
14057 tp->pcie_readrq = 4096;
14058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14060 tp->pcie_readrq = 2048;
14062 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14064 pci_read_config_word(tp->pdev,
14065 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14067 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14068 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14070 tg3_flag_clear(tp, HW_TSO_2);
14071 tg3_flag_clear(tp, TSO_CAPABLE);
14073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14075 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14076 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14077 tg3_flag_set(tp, CLKREQ_BUG);
14078 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14079 tg3_flag_set(tp, L1PLLPD_EN);
14081 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14082 /* BCM5785 devices are effectively PCIe devices, and should
14083 * follow PCIe codepaths, but do not have a PCIe capabilities
14086 tg3_flag_set(tp, PCI_EXPRESS);
14087 } else if (!tg3_flag(tp, 5705_PLUS) ||
14088 tg3_flag(tp, 5780_CLASS)) {
14089 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14090 if (!tp->pcix_cap) {
14091 dev_err(&tp->pdev->dev,
14092 "Cannot find PCI-X capability, aborting\n");
14096 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14097 tg3_flag_set(tp, PCIX_MODE);
14100 /* If we have an AMD 762 or VIA K8T800 chipset, write
14101 * reordering to the mailbox registers done by the host
14102 * controller can cause major troubles. We read back from
14103 * every mailbox register write to force the writes to be
14104 * posted to the chip in order.
14106 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14107 !tg3_flag(tp, PCI_EXPRESS))
14108 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14110 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14111 &tp->pci_cacheline_sz);
14112 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14113 &tp->pci_lat_timer);
14114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14115 tp->pci_lat_timer < 64) {
14116 tp->pci_lat_timer = 64;
14117 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14118 tp->pci_lat_timer);
14121 /* Important! -- It is critical that the PCI-X hw workaround
14122 * situation is decided before the first MMIO register access.
14124 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14125 /* 5700 BX chips need to have their TX producer index
14126 * mailboxes written twice to workaround a bug.
14128 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14130 /* If we are in PCI-X mode, enable register write workaround.
14132 * The workaround is to use indirect register accesses
14133 * for all chip writes not to mailbox registers.
14135 if (tg3_flag(tp, PCIX_MODE)) {
14138 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14140 /* The chip can have it's power management PCI config
14141 * space registers clobbered due to this bug.
14142 * So explicitly force the chip into D0 here.
14144 pci_read_config_dword(tp->pdev,
14145 tp->pm_cap + PCI_PM_CTRL,
14147 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14148 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14149 pci_write_config_dword(tp->pdev,
14150 tp->pm_cap + PCI_PM_CTRL,
14153 /* Also, force SERR#/PERR# in PCI command. */
14154 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14155 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14156 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14160 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14161 tg3_flag_set(tp, PCI_HIGH_SPEED);
14162 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14163 tg3_flag_set(tp, PCI_32BIT);
14165 /* Chip-specific fixup from Broadcom driver */
14166 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14167 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14168 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14169 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14172 /* Default fast path register access methods */
14173 tp->read32 = tg3_read32;
14174 tp->write32 = tg3_write32;
14175 tp->read32_mbox = tg3_read32;
14176 tp->write32_mbox = tg3_write32;
14177 tp->write32_tx_mbox = tg3_write32;
14178 tp->write32_rx_mbox = tg3_write32;
14180 /* Various workaround register access methods */
14181 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14182 tp->write32 = tg3_write_indirect_reg32;
14183 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14184 (tg3_flag(tp, PCI_EXPRESS) &&
14185 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14187 * Back to back register writes can cause problems on these
14188 * chips, the workaround is to read back all reg writes
14189 * except those to mailbox regs.
14191 * See tg3_write_indirect_reg32().
14193 tp->write32 = tg3_write_flush_reg32;
14196 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14197 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14198 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14199 tp->write32_rx_mbox = tg3_write_flush_reg32;
14202 if (tg3_flag(tp, ICH_WORKAROUND)) {
14203 tp->read32 = tg3_read_indirect_reg32;
14204 tp->write32 = tg3_write_indirect_reg32;
14205 tp->read32_mbox = tg3_read_indirect_mbox;
14206 tp->write32_mbox = tg3_write_indirect_mbox;
14207 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14208 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14213 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14214 pci_cmd &= ~PCI_COMMAND_MEMORY;
14215 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14218 tp->read32_mbox = tg3_read32_mbox_5906;
14219 tp->write32_mbox = tg3_write32_mbox_5906;
14220 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14221 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14224 if (tp->write32 == tg3_write_indirect_reg32 ||
14225 (tg3_flag(tp, PCIX_MODE) &&
14226 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14228 tg3_flag_set(tp, SRAM_USE_CONFIG);
14230 /* The memory arbiter has to be enabled in order for SRAM accesses
14231 * to succeed. Normally on powerup the tg3 chip firmware will make
14232 * sure it is enabled, but other entities such as system netboot
14233 * code might disable it.
14235 val = tr32(MEMARB_MODE);
14236 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14238 if (tg3_flag(tp, PCIX_MODE)) {
14239 pci_read_config_dword(tp->pdev,
14240 tp->pcix_cap + PCI_X_STATUS, &val);
14241 tp->pci_fn = val & 0x7;
14243 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14246 /* Get eeprom hw config before calling tg3_set_power_state().
14247 * In particular, the TG3_FLAG_IS_NIC flag must be
14248 * determined before calling tg3_set_power_state() so that
14249 * we know whether or not to switch out of Vaux power.
14250 * When the flag is set, it means that GPIO1 is used for eeprom
14251 * write protect and also implies that it is a LOM where GPIOs
14252 * are not used to switch power.
14254 tg3_get_eeprom_hw_cfg(tp);
14256 if (tg3_flag(tp, ENABLE_APE)) {
14257 /* Allow reads and writes to the
14258 * APE register and memory space.
14260 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14261 PCISTATE_ALLOW_APE_SHMEM_WR |
14262 PCISTATE_ALLOW_APE_PSPACE_WR;
14263 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14266 tg3_ape_lock_init(tp);
14269 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14271 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14273 tg3_flag(tp, 57765_PLUS))
14274 tg3_flag_set(tp, CPMU_PRESENT);
14276 /* Set up tp->grc_local_ctrl before calling
14277 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14278 * will bring 5700's external PHY out of reset.
14279 * It is also used as eeprom write protect on LOMs.
14281 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14283 tg3_flag(tp, EEPROM_WRITE_PROT))
14284 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14285 GRC_LCLCTRL_GPIO_OUTPUT1);
14286 /* Unused GPIO3 must be driven as output on 5752 because there
14287 * are no pull-up resistors on unused GPIO pins.
14289 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14290 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14295 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14297 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14298 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14299 /* Turn off the debug UART. */
14300 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14301 if (tg3_flag(tp, IS_NIC))
14302 /* Keep VMain power. */
14303 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14304 GRC_LCLCTRL_GPIO_OUTPUT0;
14307 /* Switch out of Vaux if it is a NIC */
14308 tg3_pwrsrc_switch_to_vmain(tp);
14310 /* Derive initial jumbo mode from MTU assigned in
14311 * ether_setup() via the alloc_etherdev() call
14313 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14314 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14316 /* Determine WakeOnLan speed to use. */
14317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14318 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14319 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14320 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14321 tg3_flag_clear(tp, WOL_SPEED_100MB);
14323 tg3_flag_set(tp, WOL_SPEED_100MB);
14326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14327 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14329 /* A few boards don't want Ethernet@WireSpeed phy feature */
14330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14331 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14332 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14333 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14334 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14335 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14336 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14338 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14339 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14340 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14341 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14342 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14344 if (tg3_flag(tp, 5705_PLUS) &&
14345 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14346 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14347 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14348 !tg3_flag(tp, 57765_PLUS)) {
14349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14350 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14353 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14354 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14355 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14356 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14357 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14359 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14363 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14364 tp->phy_otp = tg3_read_otp_phycfg(tp);
14365 if (tp->phy_otp == 0)
14366 tp->phy_otp = TG3_OTP_DEFAULT;
14369 if (tg3_flag(tp, CPMU_PRESENT))
14370 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14372 tp->mi_mode = MAC_MI_MODE_BASE;
14374 tp->coalesce_mode = 0;
14375 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14376 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14377 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14379 /* Set these bits to enable statistics workaround. */
14380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14381 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14382 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14383 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14384 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14389 tg3_flag_set(tp, USE_PHYLIB);
14391 err = tg3_mdio_init(tp);
14395 /* Initialize data/descriptor byte/word swapping. */
14396 val = tr32(GRC_MODE);
14397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14398 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14399 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14400 GRC_MODE_B2HRX_ENABLE |
14401 GRC_MODE_HTX2B_ENABLE |
14402 GRC_MODE_HOST_STACKUP);
14404 val &= GRC_MODE_HOST_STACKUP;
14406 tw32(GRC_MODE, val | tp->grc_mode);
14408 tg3_switch_clocks(tp);
14410 /* Clear this out for sanity. */
14411 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14413 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14415 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14416 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14417 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14419 if (chiprevid == CHIPREV_ID_5701_A0 ||
14420 chiprevid == CHIPREV_ID_5701_B0 ||
14421 chiprevid == CHIPREV_ID_5701_B2 ||
14422 chiprevid == CHIPREV_ID_5701_B5) {
14423 void __iomem *sram_base;
14425 /* Write some dummy words into the SRAM status block
14426 * area, see if it reads back correctly. If the return
14427 * value is bad, force enable the PCIX workaround.
14429 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14431 writel(0x00000000, sram_base);
14432 writel(0x00000000, sram_base + 4);
14433 writel(0xffffffff, sram_base + 4);
14434 if (readl(sram_base) != 0x00000000)
14435 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14440 tg3_nvram_init(tp);
14442 grc_misc_cfg = tr32(GRC_MISC_CFG);
14443 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14446 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14447 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14448 tg3_flag_set(tp, IS_5788);
14450 if (!tg3_flag(tp, IS_5788) &&
14451 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14452 tg3_flag_set(tp, TAGGED_STATUS);
14453 if (tg3_flag(tp, TAGGED_STATUS)) {
14454 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14455 HOSTCC_MODE_CLRTICK_TXBD);
14457 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14458 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14459 tp->misc_host_ctrl);
14462 /* Preserve the APE MAC_MODE bits */
14463 if (tg3_flag(tp, ENABLE_APE))
14464 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14468 /* these are limited to 10/100 only */
14469 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14470 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14471 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14472 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14473 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14474 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14475 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14476 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14477 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14478 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14479 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14480 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14481 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14482 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14483 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14484 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14486 err = tg3_phy_probe(tp);
14488 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14489 /* ... but do not return immediately ... */
14494 tg3_read_fw_ver(tp);
14496 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14497 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14500 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14502 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14505 /* 5700 {AX,BX} chips have a broken status block link
14506 * change bit implementation, so we must use the
14507 * status register in those cases.
14509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14510 tg3_flag_set(tp, USE_LINKCHG_REG);
14512 tg3_flag_clear(tp, USE_LINKCHG_REG);
14514 /* The led_ctrl is set during tg3_phy_probe, here we might
14515 * have to force the link status polling mechanism based
14516 * upon subsystem IDs.
14518 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14520 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14521 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14522 tg3_flag_set(tp, USE_LINKCHG_REG);
14525 /* For all SERDES we poll the MAC status register. */
14526 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14527 tg3_flag_set(tp, POLL_SERDES);
14529 tg3_flag_clear(tp, POLL_SERDES);
14531 tp->rx_offset = NET_IP_ALIGN;
14532 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14534 tg3_flag(tp, PCIX_MODE)) {
14536 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14537 tp->rx_copy_thresh = ~(u16)0;
14541 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14542 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14543 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14545 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14547 /* Increment the rx prod index on the rx std ring by at most
14548 * 8 for these chips to workaround hw errata.
14550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14553 tp->rx_std_max_post = 8;
14555 if (tg3_flag(tp, ASPM_WORKAROUND))
14556 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14557 PCIE_PWR_MGMT_L1_THRESH_MSK;
14562 #ifdef CONFIG_SPARC
14563 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14565 struct net_device *dev = tp->dev;
14566 struct pci_dev *pdev = tp->pdev;
14567 struct device_node *dp = pci_device_to_OF_node(pdev);
14568 const unsigned char *addr;
14571 addr = of_get_property(dp, "local-mac-address", &len);
14572 if (addr && len == 6) {
14573 memcpy(dev->dev_addr, addr, 6);
14574 memcpy(dev->perm_addr, dev->dev_addr, 6);
14580 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14582 struct net_device *dev = tp->dev;
14584 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14585 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14590 static int __devinit tg3_get_device_address(struct tg3 *tp)
14592 struct net_device *dev = tp->dev;
14593 u32 hi, lo, mac_offset;
14596 #ifdef CONFIG_SPARC
14597 if (!tg3_get_macaddr_sparc(tp))
14602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14603 tg3_flag(tp, 5780_CLASS)) {
14604 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14606 if (tg3_nvram_lock(tp))
14607 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14609 tg3_nvram_unlock(tp);
14610 } else if (tg3_flag(tp, 5717_PLUS)) {
14611 if (tp->pci_fn & 1)
14613 if (tp->pci_fn > 1)
14614 mac_offset += 0x18c;
14615 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14618 /* First try to get it from MAC address mailbox. */
14619 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14620 if ((hi >> 16) == 0x484b) {
14621 dev->dev_addr[0] = (hi >> 8) & 0xff;
14622 dev->dev_addr[1] = (hi >> 0) & 0xff;
14624 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14625 dev->dev_addr[2] = (lo >> 24) & 0xff;
14626 dev->dev_addr[3] = (lo >> 16) & 0xff;
14627 dev->dev_addr[4] = (lo >> 8) & 0xff;
14628 dev->dev_addr[5] = (lo >> 0) & 0xff;
14630 /* Some old bootcode may report a 0 MAC address in SRAM */
14631 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14634 /* Next, try NVRAM. */
14635 if (!tg3_flag(tp, NO_NVRAM) &&
14636 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14637 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14638 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14639 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14641 /* Finally just fetch it out of the MAC control regs. */
14643 hi = tr32(MAC_ADDR_0_HIGH);
14644 lo = tr32(MAC_ADDR_0_LOW);
14646 dev->dev_addr[5] = lo & 0xff;
14647 dev->dev_addr[4] = (lo >> 8) & 0xff;
14648 dev->dev_addr[3] = (lo >> 16) & 0xff;
14649 dev->dev_addr[2] = (lo >> 24) & 0xff;
14650 dev->dev_addr[1] = hi & 0xff;
14651 dev->dev_addr[0] = (hi >> 8) & 0xff;
14655 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14656 #ifdef CONFIG_SPARC
14657 if (!tg3_get_default_macaddr_sparc(tp))
14662 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14666 #define BOUNDARY_SINGLE_CACHELINE 1
14667 #define BOUNDARY_MULTI_CACHELINE 2
14669 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14671 int cacheline_size;
14675 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14677 cacheline_size = 1024;
14679 cacheline_size = (int) byte * 4;
14681 /* On 5703 and later chips, the boundary bits have no
14684 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14685 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14686 !tg3_flag(tp, PCI_EXPRESS))
14689 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14690 goal = BOUNDARY_MULTI_CACHELINE;
14692 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14693 goal = BOUNDARY_SINGLE_CACHELINE;
14699 if (tg3_flag(tp, 57765_PLUS)) {
14700 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14707 /* PCI controllers on most RISC systems tend to disconnect
14708 * when a device tries to burst across a cache-line boundary.
14709 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14711 * Unfortunately, for PCI-E there are only limited
14712 * write-side controls for this, and thus for reads
14713 * we will still get the disconnects. We'll also waste
14714 * these PCI cycles for both read and write for chips
14715 * other than 5700 and 5701 which do not implement the
14718 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14719 switch (cacheline_size) {
14724 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14725 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14726 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14728 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14729 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14734 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14735 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14739 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14740 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14743 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14744 switch (cacheline_size) {
14748 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14749 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14750 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14756 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14757 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14761 switch (cacheline_size) {
14763 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14764 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14765 DMA_RWCTRL_WRITE_BNDRY_16);
14770 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14771 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14772 DMA_RWCTRL_WRITE_BNDRY_32);
14777 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14778 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14779 DMA_RWCTRL_WRITE_BNDRY_64);
14784 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14785 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14786 DMA_RWCTRL_WRITE_BNDRY_128);
14791 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14792 DMA_RWCTRL_WRITE_BNDRY_256);
14795 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14796 DMA_RWCTRL_WRITE_BNDRY_512);
14800 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14801 DMA_RWCTRL_WRITE_BNDRY_1024);
14810 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14812 struct tg3_internal_buffer_desc test_desc;
14813 u32 sram_dma_descs;
14816 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14818 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14819 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14820 tw32(RDMAC_STATUS, 0);
14821 tw32(WDMAC_STATUS, 0);
14823 tw32(BUFMGR_MODE, 0);
14824 tw32(FTQ_RESET, 0);
14826 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14827 test_desc.addr_lo = buf_dma & 0xffffffff;
14828 test_desc.nic_mbuf = 0x00002100;
14829 test_desc.len = size;
14832 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14833 * the *second* time the tg3 driver was getting loaded after an
14836 * Broadcom tells me:
14837 * ...the DMA engine is connected to the GRC block and a DMA
14838 * reset may affect the GRC block in some unpredictable way...
14839 * The behavior of resets to individual blocks has not been tested.
14841 * Broadcom noted the GRC reset will also reset all sub-components.
14844 test_desc.cqid_sqid = (13 << 8) | 2;
14846 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14849 test_desc.cqid_sqid = (16 << 8) | 7;
14851 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14854 test_desc.flags = 0x00000005;
14856 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14859 val = *(((u32 *)&test_desc) + i);
14860 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14861 sram_dma_descs + (i * sizeof(u32)));
14862 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14864 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14867 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14869 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14872 for (i = 0; i < 40; i++) {
14876 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14878 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14879 if ((val & 0xffff) == sram_dma_descs) {
14890 #define TEST_BUFFER_SIZE 0x2000
14892 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14893 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14897 static int __devinit tg3_test_dma(struct tg3 *tp)
14899 dma_addr_t buf_dma;
14900 u32 *buf, saved_dma_rwctrl;
14903 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14904 &buf_dma, GFP_KERNEL);
14910 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14911 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14913 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14915 if (tg3_flag(tp, 57765_PLUS))
14918 if (tg3_flag(tp, PCI_EXPRESS)) {
14919 /* DMA read watermark not used on PCIE */
14920 tp->dma_rwctrl |= 0x00180000;
14921 } else if (!tg3_flag(tp, PCIX_MODE)) {
14922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14924 tp->dma_rwctrl |= 0x003f0000;
14926 tp->dma_rwctrl |= 0x003f000f;
14928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14930 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14931 u32 read_water = 0x7;
14933 /* If the 5704 is behind the EPB bridge, we can
14934 * do the less restrictive ONE_DMA workaround for
14935 * better performance.
14937 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14939 tp->dma_rwctrl |= 0x8000;
14940 else if (ccval == 0x6 || ccval == 0x7)
14941 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14945 /* Set bit 23 to enable PCIX hw bug fix */
14947 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14948 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14950 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14951 /* 5780 always in PCIX mode */
14952 tp->dma_rwctrl |= 0x00144000;
14953 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14954 /* 5714 always in PCIX mode */
14955 tp->dma_rwctrl |= 0x00148000;
14957 tp->dma_rwctrl |= 0x001b000f;
14961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14963 tp->dma_rwctrl &= 0xfffffff0;
14965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14967 /* Remove this if it causes problems for some boards. */
14968 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14970 /* On 5700/5701 chips, we need to set this bit.
14971 * Otherwise the chip will issue cacheline transactions
14972 * to streamable DMA memory with not all the byte
14973 * enables turned on. This is an error on several
14974 * RISC PCI controllers, in particular sparc64.
14976 * On 5703/5704 chips, this bit has been reassigned
14977 * a different meaning. In particular, it is used
14978 * on those chips to enable a PCI-X workaround.
14980 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14983 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14986 /* Unneeded, already done by tg3_get_invariants. */
14987 tg3_switch_clocks(tp);
14990 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14991 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14994 /* It is best to perform DMA test with maximum write burst size
14995 * to expose the 5700/5701 write DMA bug.
14997 saved_dma_rwctrl = tp->dma_rwctrl;
14998 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14999 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15004 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15007 /* Send the buffer to the chip. */
15008 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15010 dev_err(&tp->pdev->dev,
15011 "%s: Buffer write failed. err = %d\n",
15017 /* validate data reached card RAM correctly. */
15018 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15020 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15021 if (le32_to_cpu(val) != p[i]) {
15022 dev_err(&tp->pdev->dev,
15023 "%s: Buffer corrupted on device! "
15024 "(%d != %d)\n", __func__, val, i);
15025 /* ret = -ENODEV here? */
15030 /* Now read it back. */
15031 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15033 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15034 "err = %d\n", __func__, ret);
15039 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15043 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15044 DMA_RWCTRL_WRITE_BNDRY_16) {
15045 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15046 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15047 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15050 dev_err(&tp->pdev->dev,
15051 "%s: Buffer corrupted on read back! "
15052 "(%d != %d)\n", __func__, p[i], i);
15058 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15064 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15065 DMA_RWCTRL_WRITE_BNDRY_16) {
15066 /* DMA test passed without adjusting DMA boundary,
15067 * now look for chipsets that are known to expose the
15068 * DMA bug without failing the test.
15070 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15071 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15072 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15074 /* Safe to use the calculated DMA boundary. */
15075 tp->dma_rwctrl = saved_dma_rwctrl;
15078 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15082 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15087 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15089 if (tg3_flag(tp, 57765_PLUS)) {
15090 tp->bufmgr_config.mbuf_read_dma_low_water =
15091 DEFAULT_MB_RDMA_LOW_WATER_5705;
15092 tp->bufmgr_config.mbuf_mac_rx_low_water =
15093 DEFAULT_MB_MACRX_LOW_WATER_57765;
15094 tp->bufmgr_config.mbuf_high_water =
15095 DEFAULT_MB_HIGH_WATER_57765;
15097 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15098 DEFAULT_MB_RDMA_LOW_WATER_5705;
15099 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15100 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15101 tp->bufmgr_config.mbuf_high_water_jumbo =
15102 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15103 } else if (tg3_flag(tp, 5705_PLUS)) {
15104 tp->bufmgr_config.mbuf_read_dma_low_water =
15105 DEFAULT_MB_RDMA_LOW_WATER_5705;
15106 tp->bufmgr_config.mbuf_mac_rx_low_water =
15107 DEFAULT_MB_MACRX_LOW_WATER_5705;
15108 tp->bufmgr_config.mbuf_high_water =
15109 DEFAULT_MB_HIGH_WATER_5705;
15110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15111 tp->bufmgr_config.mbuf_mac_rx_low_water =
15112 DEFAULT_MB_MACRX_LOW_WATER_5906;
15113 tp->bufmgr_config.mbuf_high_water =
15114 DEFAULT_MB_HIGH_WATER_5906;
15117 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15118 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15119 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15120 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15121 tp->bufmgr_config.mbuf_high_water_jumbo =
15122 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15124 tp->bufmgr_config.mbuf_read_dma_low_water =
15125 DEFAULT_MB_RDMA_LOW_WATER;
15126 tp->bufmgr_config.mbuf_mac_rx_low_water =
15127 DEFAULT_MB_MACRX_LOW_WATER;
15128 tp->bufmgr_config.mbuf_high_water =
15129 DEFAULT_MB_HIGH_WATER;
15131 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15132 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15133 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15134 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15135 tp->bufmgr_config.mbuf_high_water_jumbo =
15136 DEFAULT_MB_HIGH_WATER_JUMBO;
15139 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15140 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15143 static char * __devinit tg3_phy_string(struct tg3 *tp)
15145 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15146 case TG3_PHY_ID_BCM5400: return "5400";
15147 case TG3_PHY_ID_BCM5401: return "5401";
15148 case TG3_PHY_ID_BCM5411: return "5411";
15149 case TG3_PHY_ID_BCM5701: return "5701";
15150 case TG3_PHY_ID_BCM5703: return "5703";
15151 case TG3_PHY_ID_BCM5704: return "5704";
15152 case TG3_PHY_ID_BCM5705: return "5705";
15153 case TG3_PHY_ID_BCM5750: return "5750";
15154 case TG3_PHY_ID_BCM5752: return "5752";
15155 case TG3_PHY_ID_BCM5714: return "5714";
15156 case TG3_PHY_ID_BCM5780: return "5780";
15157 case TG3_PHY_ID_BCM5755: return "5755";
15158 case TG3_PHY_ID_BCM5787: return "5787";
15159 case TG3_PHY_ID_BCM5784: return "5784";
15160 case TG3_PHY_ID_BCM5756: return "5722/5756";
15161 case TG3_PHY_ID_BCM5906: return "5906";
15162 case TG3_PHY_ID_BCM5761: return "5761";
15163 case TG3_PHY_ID_BCM5718C: return "5718C";
15164 case TG3_PHY_ID_BCM5718S: return "5718S";
15165 case TG3_PHY_ID_BCM57765: return "57765";
15166 case TG3_PHY_ID_BCM5719C: return "5719C";
15167 case TG3_PHY_ID_BCM5720C: return "5720C";
15168 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15169 case 0: return "serdes";
15170 default: return "unknown";
15174 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15176 if (tg3_flag(tp, PCI_EXPRESS)) {
15177 strcpy(str, "PCI Express");
15179 } else if (tg3_flag(tp, PCIX_MODE)) {
15180 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15182 strcpy(str, "PCIX:");
15184 if ((clock_ctrl == 7) ||
15185 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15186 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15187 strcat(str, "133MHz");
15188 else if (clock_ctrl == 0)
15189 strcat(str, "33MHz");
15190 else if (clock_ctrl == 2)
15191 strcat(str, "50MHz");
15192 else if (clock_ctrl == 4)
15193 strcat(str, "66MHz");
15194 else if (clock_ctrl == 6)
15195 strcat(str, "100MHz");
15197 strcpy(str, "PCI:");
15198 if (tg3_flag(tp, PCI_HIGH_SPEED))
15199 strcat(str, "66MHz");
15201 strcat(str, "33MHz");
15203 if (tg3_flag(tp, PCI_32BIT))
15204 strcat(str, ":32-bit");
15206 strcat(str, ":64-bit");
15210 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15212 struct pci_dev *peer;
15213 unsigned int func, devnr = tp->pdev->devfn & ~7;
15215 for (func = 0; func < 8; func++) {
15216 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15217 if (peer && peer != tp->pdev)
15221 /* 5704 can be configured in single-port mode, set peer to
15222 * tp->pdev in that case.
15230 * We don't need to keep the refcount elevated; there's no way
15231 * to remove one half of this device without removing the other
15238 static void __devinit tg3_init_coal(struct tg3 *tp)
15240 struct ethtool_coalesce *ec = &tp->coal;
15242 memset(ec, 0, sizeof(*ec));
15243 ec->cmd = ETHTOOL_GCOALESCE;
15244 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15245 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15246 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15247 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15248 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15249 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15250 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15251 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15252 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15254 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15255 HOSTCC_MODE_CLRTICK_TXBD)) {
15256 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15257 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15258 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15259 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15262 if (tg3_flag(tp, 5705_PLUS)) {
15263 ec->rx_coalesce_usecs_irq = 0;
15264 ec->tx_coalesce_usecs_irq = 0;
15265 ec->stats_block_coalesce_usecs = 0;
15269 static const struct net_device_ops tg3_netdev_ops = {
15270 .ndo_open = tg3_open,
15271 .ndo_stop = tg3_close,
15272 .ndo_start_xmit = tg3_start_xmit,
15273 .ndo_get_stats64 = tg3_get_stats64,
15274 .ndo_validate_addr = eth_validate_addr,
15275 .ndo_set_rx_mode = tg3_set_rx_mode,
15276 .ndo_set_mac_address = tg3_set_mac_addr,
15277 .ndo_do_ioctl = tg3_ioctl,
15278 .ndo_tx_timeout = tg3_tx_timeout,
15279 .ndo_change_mtu = tg3_change_mtu,
15280 .ndo_fix_features = tg3_fix_features,
15281 .ndo_set_features = tg3_set_features,
15282 #ifdef CONFIG_NET_POLL_CONTROLLER
15283 .ndo_poll_controller = tg3_poll_controller,
15287 static int __devinit tg3_init_one(struct pci_dev *pdev,
15288 const struct pci_device_id *ent)
15290 struct net_device *dev;
15292 int i, err, pm_cap;
15293 u32 sndmbx, rcvmbx, intmbx;
15295 u64 dma_mask, persist_dma_mask;
15298 printk_once(KERN_INFO "%s\n", version);
15300 err = pci_enable_device(pdev);
15302 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15306 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15308 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15309 goto err_out_disable_pdev;
15312 pci_set_master(pdev);
15314 /* Find power-management capability. */
15315 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15317 dev_err(&pdev->dev,
15318 "Cannot find Power Management capability, aborting\n");
15320 goto err_out_free_res;
15323 err = pci_set_power_state(pdev, PCI_D0);
15325 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15326 goto err_out_free_res;
15329 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15331 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15333 goto err_out_power_down;
15336 SET_NETDEV_DEV(dev, &pdev->dev);
15338 tp = netdev_priv(dev);
15341 tp->pm_cap = pm_cap;
15342 tp->rx_mode = TG3_DEF_RX_MODE;
15343 tp->tx_mode = TG3_DEF_TX_MODE;
15346 tp->msg_enable = tg3_debug;
15348 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15350 /* The word/byte swap controls here control register access byte
15351 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15354 tp->misc_host_ctrl =
15355 MISC_HOST_CTRL_MASK_PCI_INT |
15356 MISC_HOST_CTRL_WORD_SWAP |
15357 MISC_HOST_CTRL_INDIR_ACCESS |
15358 MISC_HOST_CTRL_PCISTATE_RW;
15360 /* The NONFRM (non-frame) byte/word swap controls take effect
15361 * on descriptor entries, anything which isn't packet data.
15363 * The StrongARM chips on the board (one for tx, one for rx)
15364 * are running in big-endian mode.
15366 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15367 GRC_MODE_WSWAP_NONFRM_DATA);
15368 #ifdef __BIG_ENDIAN
15369 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15371 spin_lock_init(&tp->lock);
15372 spin_lock_init(&tp->indirect_lock);
15373 INIT_WORK(&tp->reset_task, tg3_reset_task);
15375 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15377 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15379 goto err_out_free_dev;
15382 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15383 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15384 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15385 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15386 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15387 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15388 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15389 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15390 tg3_flag_set(tp, ENABLE_APE);
15391 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15392 if (!tp->aperegs) {
15393 dev_err(&pdev->dev,
15394 "Cannot map APE registers, aborting\n");
15396 goto err_out_iounmap;
15400 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15401 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15403 dev->ethtool_ops = &tg3_ethtool_ops;
15404 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15405 dev->netdev_ops = &tg3_netdev_ops;
15406 dev->irq = pdev->irq;
15408 err = tg3_get_invariants(tp);
15410 dev_err(&pdev->dev,
15411 "Problem fetching invariants of chip, aborting\n");
15412 goto err_out_apeunmap;
15415 /* The EPB bridge inside 5714, 5715, and 5780 and any
15416 * device behind the EPB cannot support DMA addresses > 40-bit.
15417 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15418 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15419 * do DMA address check in tg3_start_xmit().
15421 if (tg3_flag(tp, IS_5788))
15422 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15423 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15424 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15425 #ifdef CONFIG_HIGHMEM
15426 dma_mask = DMA_BIT_MASK(64);
15429 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15431 /* Configure DMA attributes. */
15432 if (dma_mask > DMA_BIT_MASK(32)) {
15433 err = pci_set_dma_mask(pdev, dma_mask);
15435 features |= NETIF_F_HIGHDMA;
15436 err = pci_set_consistent_dma_mask(pdev,
15439 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15440 "DMA for consistent allocations\n");
15441 goto err_out_apeunmap;
15445 if (err || dma_mask == DMA_BIT_MASK(32)) {
15446 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15448 dev_err(&pdev->dev,
15449 "No usable DMA configuration, aborting\n");
15450 goto err_out_apeunmap;
15454 tg3_init_bufmgr_config(tp);
15456 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15458 /* 5700 B0 chips do not support checksumming correctly due
15459 * to hardware bugs.
15461 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15462 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15464 if (tg3_flag(tp, 5755_PLUS))
15465 features |= NETIF_F_IPV6_CSUM;
15468 /* TSO is on by default on chips that support hardware TSO.
15469 * Firmware TSO on older chips gives lower performance, so it
15470 * is off by default, but can be enabled using ethtool.
15472 if ((tg3_flag(tp, HW_TSO_1) ||
15473 tg3_flag(tp, HW_TSO_2) ||
15474 tg3_flag(tp, HW_TSO_3)) &&
15475 (features & NETIF_F_IP_CSUM))
15476 features |= NETIF_F_TSO;
15477 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15478 if (features & NETIF_F_IPV6_CSUM)
15479 features |= NETIF_F_TSO6;
15480 if (tg3_flag(tp, HW_TSO_3) ||
15481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15482 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15483 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15486 features |= NETIF_F_TSO_ECN;
15489 dev->features |= features;
15490 dev->vlan_features |= features;
15493 * Add loopback capability only for a subset of devices that support
15494 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15495 * loopback for the remaining devices.
15497 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15498 !tg3_flag(tp, CPMU_PRESENT))
15499 /* Add the loopback capability */
15500 features |= NETIF_F_LOOPBACK;
15502 dev->hw_features |= features;
15504 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15505 !tg3_flag(tp, TSO_CAPABLE) &&
15506 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15507 tg3_flag_set(tp, MAX_RXPEND_64);
15508 tp->rx_pending = 63;
15511 err = tg3_get_device_address(tp);
15513 dev_err(&pdev->dev,
15514 "Could not obtain valid ethernet address, aborting\n");
15515 goto err_out_apeunmap;
15519 * Reset chip in case UNDI or EFI driver did not shutdown
15520 * DMA self test will enable WDMAC and we'll see (spurious)
15521 * pending DMA on the PCI bus at that point.
15523 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15524 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15525 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15526 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15529 err = tg3_test_dma(tp);
15531 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15532 goto err_out_apeunmap;
15535 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15536 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15537 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15538 for (i = 0; i < tp->irq_max; i++) {
15539 struct tg3_napi *tnapi = &tp->napi[i];
15542 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15544 tnapi->int_mbox = intmbx;
15550 tnapi->consmbox = rcvmbx;
15551 tnapi->prodmbox = sndmbx;
15554 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15556 tnapi->coal_now = HOSTCC_MODE_NOW;
15558 if (!tg3_flag(tp, SUPPORT_MSIX))
15562 * If we support MSIX, we'll be using RSS. If we're using
15563 * RSS, the first vector only handles link interrupts and the
15564 * remaining vectors handle rx and tx interrupts. Reuse the
15565 * mailbox values for the next iteration. The values we setup
15566 * above are still useful for the single vectored mode.
15581 pci_set_drvdata(pdev, dev);
15583 if (tg3_flag(tp, 5717_PLUS)) {
15584 /* Resume a low-power mode */
15585 tg3_frob_aux_power(tp, false);
15588 err = register_netdev(dev);
15590 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15591 goto err_out_apeunmap;
15594 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15595 tp->board_part_number,
15596 tp->pci_chip_rev_id,
15597 tg3_bus_string(tp, str),
15600 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15601 struct phy_device *phydev;
15602 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15604 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15605 phydev->drv->name, dev_name(&phydev->dev));
15609 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15610 ethtype = "10/100Base-TX";
15611 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15612 ethtype = "1000Base-SX";
15614 ethtype = "10/100/1000Base-T";
15616 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15617 "(WireSpeed[%d], EEE[%d])\n",
15618 tg3_phy_string(tp), ethtype,
15619 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15620 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15623 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15624 (dev->features & NETIF_F_RXCSUM) != 0,
15625 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15626 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15627 tg3_flag(tp, ENABLE_ASF) != 0,
15628 tg3_flag(tp, TSO_CAPABLE) != 0);
15629 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15631 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15632 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15634 pci_save_state(pdev);
15640 iounmap(tp->aperegs);
15641 tp->aperegs = NULL;
15653 err_out_power_down:
15654 pci_set_power_state(pdev, PCI_D3hot);
15657 pci_release_regions(pdev);
15659 err_out_disable_pdev:
15660 pci_disable_device(pdev);
15661 pci_set_drvdata(pdev, NULL);
15665 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15667 struct net_device *dev = pci_get_drvdata(pdev);
15670 struct tg3 *tp = netdev_priv(dev);
15673 release_firmware(tp->fw);
15675 cancel_work_sync(&tp->reset_task);
15677 if (!tg3_flag(tp, USE_PHYLIB)) {
15682 unregister_netdev(dev);
15684 iounmap(tp->aperegs);
15685 tp->aperegs = NULL;
15692 pci_release_regions(pdev);
15693 pci_disable_device(pdev);
15694 pci_set_drvdata(pdev, NULL);
15698 #ifdef CONFIG_PM_SLEEP
15699 static int tg3_suspend(struct device *device)
15701 struct pci_dev *pdev = to_pci_dev(device);
15702 struct net_device *dev = pci_get_drvdata(pdev);
15703 struct tg3 *tp = netdev_priv(dev);
15706 if (!netif_running(dev))
15709 flush_work_sync(&tp->reset_task);
15711 tg3_netif_stop(tp);
15713 del_timer_sync(&tp->timer);
15715 tg3_full_lock(tp, 1);
15716 tg3_disable_ints(tp);
15717 tg3_full_unlock(tp);
15719 netif_device_detach(dev);
15721 tg3_full_lock(tp, 0);
15722 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15723 tg3_flag_clear(tp, INIT_COMPLETE);
15724 tg3_full_unlock(tp);
15726 err = tg3_power_down_prepare(tp);
15730 tg3_full_lock(tp, 0);
15732 tg3_flag_set(tp, INIT_COMPLETE);
15733 err2 = tg3_restart_hw(tp, 1);
15737 tp->timer.expires = jiffies + tp->timer_offset;
15738 add_timer(&tp->timer);
15740 netif_device_attach(dev);
15741 tg3_netif_start(tp);
15744 tg3_full_unlock(tp);
15753 static int tg3_resume(struct device *device)
15755 struct pci_dev *pdev = to_pci_dev(device);
15756 struct net_device *dev = pci_get_drvdata(pdev);
15757 struct tg3 *tp = netdev_priv(dev);
15760 if (!netif_running(dev))
15763 netif_device_attach(dev);
15765 tg3_full_lock(tp, 0);
15767 tg3_flag_set(tp, INIT_COMPLETE);
15768 err = tg3_restart_hw(tp, 1);
15772 tp->timer.expires = jiffies + tp->timer_offset;
15773 add_timer(&tp->timer);
15775 tg3_netif_start(tp);
15778 tg3_full_unlock(tp);
15786 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15787 #define TG3_PM_OPS (&tg3_pm_ops)
15791 #define TG3_PM_OPS NULL
15793 #endif /* CONFIG_PM_SLEEP */
15796 * tg3_io_error_detected - called when PCI error is detected
15797 * @pdev: Pointer to PCI device
15798 * @state: The current pci connection state
15800 * This function is called after a PCI bus error affecting
15801 * this device has been detected.
15803 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15804 pci_channel_state_t state)
15806 struct net_device *netdev = pci_get_drvdata(pdev);
15807 struct tg3 *tp = netdev_priv(netdev);
15808 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15810 netdev_info(netdev, "PCI I/O error detected\n");
15814 if (!netif_running(netdev))
15819 tg3_netif_stop(tp);
15821 del_timer_sync(&tp->timer);
15822 tg3_flag_clear(tp, RESTART_TIMER);
15824 /* Want to make sure that the reset task doesn't run */
15825 cancel_work_sync(&tp->reset_task);
15826 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15827 tg3_flag_clear(tp, RESTART_TIMER);
15829 netif_device_detach(netdev);
15831 /* Clean up software state, even if MMIO is blocked */
15832 tg3_full_lock(tp, 0);
15833 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15834 tg3_full_unlock(tp);
15837 if (state == pci_channel_io_perm_failure)
15838 err = PCI_ERS_RESULT_DISCONNECT;
15840 pci_disable_device(pdev);
15848 * tg3_io_slot_reset - called after the pci bus has been reset.
15849 * @pdev: Pointer to PCI device
15851 * Restart the card from scratch, as if from a cold-boot.
15852 * At this point, the card has exprienced a hard reset,
15853 * followed by fixups by BIOS, and has its config space
15854 * set up identically to what it was at cold boot.
15856 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15858 struct net_device *netdev = pci_get_drvdata(pdev);
15859 struct tg3 *tp = netdev_priv(netdev);
15860 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15865 if (pci_enable_device(pdev)) {
15866 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15870 pci_set_master(pdev);
15871 pci_restore_state(pdev);
15872 pci_save_state(pdev);
15874 if (!netif_running(netdev)) {
15875 rc = PCI_ERS_RESULT_RECOVERED;
15879 err = tg3_power_up(tp);
15883 rc = PCI_ERS_RESULT_RECOVERED;
15892 * tg3_io_resume - called when traffic can start flowing again.
15893 * @pdev: Pointer to PCI device
15895 * This callback is called when the error recovery driver tells
15896 * us that its OK to resume normal operation.
15898 static void tg3_io_resume(struct pci_dev *pdev)
15900 struct net_device *netdev = pci_get_drvdata(pdev);
15901 struct tg3 *tp = netdev_priv(netdev);
15906 if (!netif_running(netdev))
15909 tg3_full_lock(tp, 0);
15910 tg3_flag_set(tp, INIT_COMPLETE);
15911 err = tg3_restart_hw(tp, 1);
15912 tg3_full_unlock(tp);
15914 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15918 netif_device_attach(netdev);
15920 tp->timer.expires = jiffies + tp->timer_offset;
15921 add_timer(&tp->timer);
15923 tg3_netif_start(tp);
15931 static struct pci_error_handlers tg3_err_handler = {
15932 .error_detected = tg3_io_error_detected,
15933 .slot_reset = tg3_io_slot_reset,
15934 .resume = tg3_io_resume
15937 static struct pci_driver tg3_driver = {
15938 .name = DRV_MODULE_NAME,
15939 .id_table = tg3_pci_tbl,
15940 .probe = tg3_init_one,
15941 .remove = __devexit_p(tg3_remove_one),
15942 .err_handler = &tg3_err_handler,
15943 .driver.pm = TG3_PM_OPS,
15946 static int __init tg3_init(void)
15948 return pci_register_driver(&tg3_driver);
15951 static void __exit tg3_cleanup(void)
15953 pci_unregister_driver(&tg3_driver);
15956 module_init(tg3_init);
15957 module_exit(tg3_cleanup);