2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 #define DRV_MODULE_NAME "tg3"
67 #define TG3_MIN_NUM 117
68 #define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70 #define DRV_MODULE_RELDATE "January 25, 2011"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_STD_RING_SIZE(tp) \
100 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
101 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JMB_RING_SIZE(tp) \
104 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
105 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
106 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 #define TG3_RSS_INDIR_TBL_SIZE 128
109 /* Do not place this n-ring entries value into the tp struct itself,
110 * we really want to expose these constants to GCC so that modulo et
111 * al. operations are done with shifts and masks instead of with
112 * hw multiply/modulo instructions. Another solution would be to
113 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_STD_RING_BYTES(tp) \
120 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
121 #define TG3_RX_JMB_RING_BYTES(tp) \
122 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
123 #define TG3_RX_RCB_RING_BYTES(tp) \
124 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
140 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
142 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
143 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
145 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
146 * that are at least dword aligned when used in PCIX mode. The driver
147 * works around this bug by double copying the packet. This workaround
148 * is built into the normal double copy length check for efficiency.
150 * However, the double copy is only necessary on those architectures
151 * where unaligned memory accesses are inefficient. For those architectures
152 * where unaligned memory accesses incur little penalty, we can reintegrate
153 * the 5701 in the normal rx path. Doing so saves a device structure
154 * dereference by hardcoding the double copy threshold in place.
156 #define TG3_RX_COPY_THRESHOLD 256
157 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
158 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
160 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
163 /* minimum number of free TX descriptors required to wake up TX process */
164 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
166 #define TG3_RAW_IP_ALIGN 2
168 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
170 #define FIRMWARE_TG3 "tigon/tg3.bin"
171 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
172 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
174 static char version[] __devinitdata =
175 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
177 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
178 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
179 MODULE_LICENSE("GPL");
180 MODULE_VERSION(DRV_MODULE_VERSION);
181 MODULE_FIRMWARE(FIRMWARE_TG3);
182 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
183 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
185 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
186 module_param(tg3_debug, int, 0);
187 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
189 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
263 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
264 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
265 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
266 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
267 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
268 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
269 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
273 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
275 static const struct {
276 const char string[ETH_GSTRING_LEN];
277 } ethtool_stats_keys[] = {
280 { "rx_ucast_packets" },
281 { "rx_mcast_packets" },
282 { "rx_bcast_packets" },
284 { "rx_align_errors" },
285 { "rx_xon_pause_rcvd" },
286 { "rx_xoff_pause_rcvd" },
287 { "rx_mac_ctrl_rcvd" },
288 { "rx_xoff_entered" },
289 { "rx_frame_too_long_errors" },
291 { "rx_undersize_packets" },
292 { "rx_in_length_errors" },
293 { "rx_out_length_errors" },
294 { "rx_64_or_less_octet_packets" },
295 { "rx_65_to_127_octet_packets" },
296 { "rx_128_to_255_octet_packets" },
297 { "rx_256_to_511_octet_packets" },
298 { "rx_512_to_1023_octet_packets" },
299 { "rx_1024_to_1522_octet_packets" },
300 { "rx_1523_to_2047_octet_packets" },
301 { "rx_2048_to_4095_octet_packets" },
302 { "rx_4096_to_8191_octet_packets" },
303 { "rx_8192_to_9022_octet_packets" },
310 { "tx_flow_control" },
312 { "tx_single_collisions" },
313 { "tx_mult_collisions" },
315 { "tx_excessive_collisions" },
316 { "tx_late_collisions" },
317 { "tx_collide_2times" },
318 { "tx_collide_3times" },
319 { "tx_collide_4times" },
320 { "tx_collide_5times" },
321 { "tx_collide_6times" },
322 { "tx_collide_7times" },
323 { "tx_collide_8times" },
324 { "tx_collide_9times" },
325 { "tx_collide_10times" },
326 { "tx_collide_11times" },
327 { "tx_collide_12times" },
328 { "tx_collide_13times" },
329 { "tx_collide_14times" },
330 { "tx_collide_15times" },
331 { "tx_ucast_packets" },
332 { "tx_mcast_packets" },
333 { "tx_bcast_packets" },
334 { "tx_carrier_sense_errors" },
338 { "dma_writeq_full" },
339 { "dma_write_prioq_full" },
342 { "mbuf_lwm_thresh_hit" },
344 { "rx_threshold_hit" },
346 { "dma_readq_full" },
347 { "dma_read_prioq_full" },
348 { "tx_comp_queue_full" },
350 { "ring_set_send_prod_index" },
351 { "ring_status_update" },
353 { "nic_avoided_irqs" },
354 { "nic_tx_threshold_hit" }
357 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_test_keys[] = {
363 { "nvram test (online) " },
364 { "link test (online) " },
365 { "register test (offline)" },
366 { "memory test (offline)" },
367 { "loopback test (offline)" },
368 { "interrupt test (offline)" },
371 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
374 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
376 writel(val, tp->regs + off);
379 static u32 tg3_read32(struct tg3 *tp, u32 off)
381 return readl(tp->regs + off);
384 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
386 writel(val, tp->aperegs + off);
389 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
391 return readl(tp->aperegs + off);
394 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
404 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
407 readl(tp->regs + off);
410 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
415 spin_lock_irqsave(&tp->indirect_lock, flags);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
417 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
418 spin_unlock_irqrestore(&tp->indirect_lock, flags);
422 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
426 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
427 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
428 TG3_64BIT_REG_LOW, val);
431 if (off == TG3_RX_STD_PROD_IDX_REG) {
432 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
433 TG3_64BIT_REG_LOW, val);
437 spin_lock_irqsave(&tp->indirect_lock, flags);
438 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
439 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
440 spin_unlock_irqrestore(&tp->indirect_lock, flags);
442 /* In indirect mode when disabling interrupts, we also need
443 * to clear the interrupt bit in the GRC local ctrl register.
445 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
447 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
448 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
452 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 /* usec_wait specifies the wait time in usec when writing to certain registers
465 * where it is unsafe to read back the register without some delay.
466 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
467 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
469 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
471 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
472 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 /* Non-posted methods */
474 tp->write32(tp, off, val);
477 tg3_write32(tp, off, val);
482 /* Wait again after the read for the posted method to guarantee that
483 * the wait time is met.
489 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
491 tp->write32_mbox(tp, off, val);
492 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
493 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
494 tp->read32_mbox(tp, off);
497 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
499 void __iomem *mbox = tp->regs + off;
501 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
503 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
507 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
509 return readl(tp->regs + off + GRCMBOX_BASE);
512 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
514 writel(val, tp->regs + off + GRCMBOX_BASE);
517 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
518 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
519 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
520 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
521 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
523 #define tw32(reg, val) tp->write32(tp, reg, val)
524 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
525 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
526 #define tr32(reg) tp->read32(tp, reg)
528 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
532 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
533 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
536 spin_lock_irqsave(&tp->indirect_lock, flags);
537 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
539 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
541 /* Always leave this as zero. */
542 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
544 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
545 tw32_f(TG3PCI_MEM_WIN_DATA, val);
547 /* Always leave this as zero. */
548 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
550 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
557 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
558 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
563 spin_lock_irqsave(&tp->indirect_lock, flags);
564 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
566 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568 /* Always leave this as zero. */
569 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
572 *val = tr32(TG3PCI_MEM_WIN_DATA);
574 /* Always leave this as zero. */
575 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577 spin_unlock_irqrestore(&tp->indirect_lock, flags);
580 static void tg3_ape_lock_init(struct tg3 *tp)
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
586 regbase = TG3_APE_LOCK_GRANT;
588 regbase = TG3_APE_PER_LOCK_GRANT;
590 /* Make sure the driver hasn't any stale locks. */
591 for (i = 0; i < 8; i++)
592 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
595 static int tg3_ape_lock(struct tg3 *tp, int locknum)
599 u32 status, req, gnt;
601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
605 case TG3_APE_LOCK_GRC:
606 case TG3_APE_LOCK_MEM:
612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
613 req = TG3_APE_LOCK_REQ;
614 gnt = TG3_APE_LOCK_GRANT;
616 req = TG3_APE_PER_LOCK_REQ;
617 gnt = TG3_APE_PER_LOCK_GRANT;
622 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
624 /* Wait for up to 1 millisecond to acquire lock. */
625 for (i = 0; i < 100; i++) {
626 status = tg3_ape_read32(tp, gnt + off);
627 if (status == APE_LOCK_GRANT_DRIVER)
632 if (status != APE_LOCK_GRANT_DRIVER) {
633 /* Revoke the lock request. */
634 tg3_ape_write32(tp, gnt + off,
635 APE_LOCK_GRANT_DRIVER);
643 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
647 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
651 case TG3_APE_LOCK_GRC:
652 case TG3_APE_LOCK_MEM:
658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
659 gnt = TG3_APE_LOCK_GRANT;
661 gnt = TG3_APE_PER_LOCK_GRANT;
663 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
666 static void tg3_disable_ints(struct tg3 *tp)
670 tw32(TG3PCI_MISC_HOST_CTRL,
671 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
672 for (i = 0; i < tp->irq_max; i++)
673 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
676 static void tg3_enable_ints(struct tg3 *tp)
683 tw32(TG3PCI_MISC_HOST_CTRL,
684 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
686 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
687 for (i = 0; i < tp->irq_cnt; i++) {
688 struct tg3_napi *tnapi = &tp->napi[i];
690 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
691 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
692 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
694 tp->coal_now |= tnapi->coal_now;
697 /* Force an initial interrupt */
698 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
699 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
700 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
702 tw32(HOSTCC_MODE, tp->coal_now);
704 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
707 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
709 struct tg3 *tp = tnapi->tp;
710 struct tg3_hw_status *sblk = tnapi->hw_status;
711 unsigned int work_exists = 0;
713 /* check for phy events */
714 if (!(tp->tg3_flags &
715 (TG3_FLAG_USE_LINKCHG_REG |
716 TG3_FLAG_POLL_SERDES))) {
717 if (sblk->status & SD_STATUS_LINK_CHG)
720 /* check for RX/TX work to do */
721 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
722 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
729 * similar to tg3_enable_ints, but it accurately determines whether there
730 * is new work pending and can return without flushing the PIO write
731 * which reenables interrupts
733 static void tg3_int_reenable(struct tg3_napi *tnapi)
735 struct tg3 *tp = tnapi->tp;
737 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
740 /* When doing tagged status, this work check is unnecessary.
741 * The last_tag we write above tells the chip which piece of
742 * work we've completed.
744 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
746 tw32(HOSTCC_MODE, tp->coalesce_mode |
747 HOSTCC_MODE_ENABLE | tnapi->coal_now);
750 static void tg3_switch_clocks(struct tg3 *tp)
755 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
756 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
759 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
761 orig_clock_ctrl = clock_ctrl;
762 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
763 CLOCK_CTRL_CLKRUN_OENABLE |
765 tp->pci_clock_ctrl = clock_ctrl;
767 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
768 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769 tw32_wait_f(TG3PCI_CLOCK_CTRL,
770 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
772 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773 tw32_wait_f(TG3PCI_CLOCK_CTRL,
775 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
777 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778 clock_ctrl | (CLOCK_CTRL_ALTCLK),
781 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
784 #define PHY_BUSY_LOOPS 5000
786 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
792 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
794 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
800 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
801 MI_COM_PHY_ADDR_MASK);
802 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
803 MI_COM_REG_ADDR_MASK);
804 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
806 tw32_f(MAC_MI_COM, frame_val);
808 loops = PHY_BUSY_LOOPS;
811 frame_val = tr32(MAC_MI_COM);
813 if ((frame_val & MI_COM_BUSY) == 0) {
815 frame_val = tr32(MAC_MI_COM);
823 *val = frame_val & MI_COM_DATA_MASK;
827 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828 tw32_f(MAC_MI_MODE, tp->mi_mode);
835 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
841 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
842 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
845 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
851 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
852 MI_COM_PHY_ADDR_MASK);
853 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854 MI_COM_REG_ADDR_MASK);
855 frame_val |= (val & MI_COM_DATA_MASK);
856 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
858 tw32_f(MAC_MI_COM, frame_val);
860 loops = PHY_BUSY_LOOPS;
863 frame_val = tr32(MAC_MI_COM);
864 if ((frame_val & MI_COM_BUSY) == 0) {
866 frame_val = tr32(MAC_MI_COM);
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
884 static int tg3_bmcr_reset(struct tg3 *tp)
889 /* OK, reset it, and poll the BMCR_RESET bit until it
890 * clears or we time out.
892 phy_control = BMCR_RESET;
893 err = tg3_writephy(tp, MII_BMCR, phy_control);
899 err = tg3_readphy(tp, MII_BMCR, &phy_control);
903 if ((phy_control & BMCR_RESET) == 0) {
915 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
917 struct tg3 *tp = bp->priv;
920 spin_lock_bh(&tp->lock);
922 if (tg3_readphy(tp, reg, &val))
925 spin_unlock_bh(&tp->lock);
930 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
932 struct tg3 *tp = bp->priv;
935 spin_lock_bh(&tp->lock);
937 if (tg3_writephy(tp, reg, val))
940 spin_unlock_bh(&tp->lock);
945 static int tg3_mdio_reset(struct mii_bus *bp)
950 static void tg3_mdio_config_5785(struct tg3 *tp)
953 struct phy_device *phydev;
955 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
956 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
957 case PHY_ID_BCM50610:
958 case PHY_ID_BCM50610M:
959 val = MAC_PHYCFG2_50610_LED_MODES;
961 case PHY_ID_BCMAC131:
962 val = MAC_PHYCFG2_AC131_LED_MODES;
964 case PHY_ID_RTL8211C:
965 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
967 case PHY_ID_RTL8201E:
968 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
974 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
975 tw32(MAC_PHYCFG2, val);
977 val = tr32(MAC_PHYCFG1);
978 val &= ~(MAC_PHYCFG1_RGMII_INT |
979 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
980 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
981 tw32(MAC_PHYCFG1, val);
986 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
987 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
988 MAC_PHYCFG2_FMODE_MASK_MASK |
989 MAC_PHYCFG2_GMODE_MASK_MASK |
990 MAC_PHYCFG2_ACT_MASK_MASK |
991 MAC_PHYCFG2_QUAL_MASK_MASK |
992 MAC_PHYCFG2_INBAND_ENABLE;
994 tw32(MAC_PHYCFG2, val);
996 val = tr32(MAC_PHYCFG1);
997 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
998 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
999 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1000 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1001 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1003 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1005 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1006 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1007 tw32(MAC_PHYCFG1, val);
1009 val = tr32(MAC_EXT_RGMII_MODE);
1010 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1011 MAC_RGMII_MODE_RX_QUALITY |
1012 MAC_RGMII_MODE_RX_ACTIVITY |
1013 MAC_RGMII_MODE_RX_ENG_DET |
1014 MAC_RGMII_MODE_TX_ENABLE |
1015 MAC_RGMII_MODE_TX_LOWPWR |
1016 MAC_RGMII_MODE_TX_RESET);
1017 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1018 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1019 val |= MAC_RGMII_MODE_RX_INT_B |
1020 MAC_RGMII_MODE_RX_QUALITY |
1021 MAC_RGMII_MODE_RX_ACTIVITY |
1022 MAC_RGMII_MODE_RX_ENG_DET;
1023 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1024 val |= MAC_RGMII_MODE_TX_ENABLE |
1025 MAC_RGMII_MODE_TX_LOWPWR |
1026 MAC_RGMII_MODE_TX_RESET;
1028 tw32(MAC_EXT_RGMII_MODE, val);
1031 static void tg3_mdio_start(struct tg3 *tp)
1033 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1034 tw32_f(MAC_MI_MODE, tp->mi_mode);
1037 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1039 tg3_mdio_config_5785(tp);
1042 static int tg3_mdio_init(struct tg3 *tp)
1046 struct phy_device *phydev;
1048 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
1051 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1053 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1054 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1056 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1057 TG3_CPMU_PHY_STRAP_IS_SERDES;
1061 tp->phy_addr = TG3_PHY_MII_ADDR;
1065 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1066 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1069 tp->mdio_bus = mdiobus_alloc();
1070 if (tp->mdio_bus == NULL)
1073 tp->mdio_bus->name = "tg3 mdio bus";
1074 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1075 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1076 tp->mdio_bus->priv = tp;
1077 tp->mdio_bus->parent = &tp->pdev->dev;
1078 tp->mdio_bus->read = &tg3_mdio_read;
1079 tp->mdio_bus->write = &tg3_mdio_write;
1080 tp->mdio_bus->reset = &tg3_mdio_reset;
1081 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1082 tp->mdio_bus->irq = &tp->mdio_irq[0];
1084 for (i = 0; i < PHY_MAX_ADDR; i++)
1085 tp->mdio_bus->irq[i] = PHY_POLL;
1087 /* The bus registration will look for all the PHYs on the mdio bus.
1088 * Unfortunately, it does not ensure the PHY is powered up before
1089 * accessing the PHY ID registers. A chip reset is the
1090 * quickest way to bring the device back to an operational state..
1092 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1095 i = mdiobus_register(tp->mdio_bus);
1097 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1098 mdiobus_free(tp->mdio_bus);
1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1104 if (!phydev || !phydev->drv) {
1105 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1106 mdiobus_unregister(tp->mdio_bus);
1107 mdiobus_free(tp->mdio_bus);
1111 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1112 case PHY_ID_BCM57780:
1113 phydev->interface = PHY_INTERFACE_MODE_GMII;
1114 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1116 case PHY_ID_BCM50610:
1117 case PHY_ID_BCM50610M:
1118 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1119 PHY_BRCM_RX_REFCLK_UNUSED |
1120 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1121 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1123 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1124 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1125 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1126 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1127 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1129 case PHY_ID_RTL8211C:
1130 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1132 case PHY_ID_RTL8201E:
1133 case PHY_ID_BCMAC131:
1134 phydev->interface = PHY_INTERFACE_MODE_MII;
1135 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1136 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1140 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1143 tg3_mdio_config_5785(tp);
1148 static void tg3_mdio_fini(struct tg3 *tp)
1150 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1151 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1152 mdiobus_unregister(tp->mdio_bus);
1153 mdiobus_free(tp->mdio_bus);
1157 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1161 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1165 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1169 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1170 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1174 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1180 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1184 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1188 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1192 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1193 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1197 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1203 /* tp->lock is held. */
1204 static inline void tg3_generate_fw_event(struct tg3 *tp)
1208 val = tr32(GRC_RX_CPU_EVENT);
1209 val |= GRC_RX_CPU_DRIVER_EVENT;
1210 tw32_f(GRC_RX_CPU_EVENT, val);
1212 tp->last_event_jiffies = jiffies;
1215 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1217 /* tp->lock is held. */
1218 static void tg3_wait_for_event_ack(struct tg3 *tp)
1221 unsigned int delay_cnt;
1224 /* If enough time has passed, no wait is necessary. */
1225 time_remain = (long)(tp->last_event_jiffies + 1 +
1226 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1228 if (time_remain < 0)
1231 /* Check if we can shorten the wait time. */
1232 delay_cnt = jiffies_to_usecs(time_remain);
1233 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1234 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1235 delay_cnt = (delay_cnt >> 3) + 1;
1237 for (i = 0; i < delay_cnt; i++) {
1238 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1244 /* tp->lock is held. */
1245 static void tg3_ump_link_report(struct tg3 *tp)
1250 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1251 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1254 tg3_wait_for_event_ack(tp);
1256 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1258 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1261 if (!tg3_readphy(tp, MII_BMCR, ®))
1263 if (!tg3_readphy(tp, MII_BMSR, ®))
1264 val |= (reg & 0xffff);
1265 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1268 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1270 if (!tg3_readphy(tp, MII_LPA, ®))
1271 val |= (reg & 0xffff);
1272 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1275 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1276 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1278 if (!tg3_readphy(tp, MII_STAT1000, ®))
1279 val |= (reg & 0xffff);
1281 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1283 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1287 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1289 tg3_generate_fw_event(tp);
1292 static void tg3_link_report(struct tg3 *tp)
1294 if (!netif_carrier_ok(tp->dev)) {
1295 netif_info(tp, link, tp->dev, "Link is down\n");
1296 tg3_ump_link_report(tp);
1297 } else if (netif_msg_link(tp)) {
1298 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1299 (tp->link_config.active_speed == SPEED_1000 ?
1301 (tp->link_config.active_speed == SPEED_100 ?
1303 (tp->link_config.active_duplex == DUPLEX_FULL ?
1306 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1307 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1309 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1311 tg3_ump_link_report(tp);
1315 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1319 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1320 miireg = ADVERTISE_PAUSE_CAP;
1321 else if (flow_ctrl & FLOW_CTRL_TX)
1322 miireg = ADVERTISE_PAUSE_ASYM;
1323 else if (flow_ctrl & FLOW_CTRL_RX)
1324 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1331 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1335 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1336 miireg = ADVERTISE_1000XPAUSE;
1337 else if (flow_ctrl & FLOW_CTRL_TX)
1338 miireg = ADVERTISE_1000XPSE_ASYM;
1339 else if (flow_ctrl & FLOW_CTRL_RX)
1340 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1347 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1351 if (lcladv & ADVERTISE_1000XPAUSE) {
1352 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1353 if (rmtadv & LPA_1000XPAUSE)
1354 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1355 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1358 if (rmtadv & LPA_1000XPAUSE)
1359 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1361 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1362 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1369 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1373 u32 old_rx_mode = tp->rx_mode;
1374 u32 old_tx_mode = tp->tx_mode;
1376 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1377 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1379 autoneg = tp->link_config.autoneg;
1381 if (autoneg == AUTONEG_ENABLE &&
1382 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1383 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1384 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1386 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1388 flowctrl = tp->link_config.flowctrl;
1390 tp->link_config.active_flowctrl = flowctrl;
1392 if (flowctrl & FLOW_CTRL_RX)
1393 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1395 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1397 if (old_rx_mode != tp->rx_mode)
1398 tw32_f(MAC_RX_MODE, tp->rx_mode);
1400 if (flowctrl & FLOW_CTRL_TX)
1401 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1403 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1405 if (old_tx_mode != tp->tx_mode)
1406 tw32_f(MAC_TX_MODE, tp->tx_mode);
1409 static void tg3_adjust_link(struct net_device *dev)
1411 u8 oldflowctrl, linkmesg = 0;
1412 u32 mac_mode, lcl_adv, rmt_adv;
1413 struct tg3 *tp = netdev_priv(dev);
1414 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1416 spin_lock_bh(&tp->lock);
1418 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1419 MAC_MODE_HALF_DUPLEX);
1421 oldflowctrl = tp->link_config.active_flowctrl;
1427 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1428 mac_mode |= MAC_MODE_PORT_MODE_MII;
1429 else if (phydev->speed == SPEED_1000 ||
1430 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1431 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1433 mac_mode |= MAC_MODE_PORT_MODE_MII;
1435 if (phydev->duplex == DUPLEX_HALF)
1436 mac_mode |= MAC_MODE_HALF_DUPLEX;
1438 lcl_adv = tg3_advert_flowctrl_1000T(
1439 tp->link_config.flowctrl);
1442 rmt_adv = LPA_PAUSE_CAP;
1443 if (phydev->asym_pause)
1444 rmt_adv |= LPA_PAUSE_ASYM;
1447 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1449 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1451 if (mac_mode != tp->mac_mode) {
1452 tp->mac_mode = mac_mode;
1453 tw32_f(MAC_MODE, tp->mac_mode);
1457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1458 if (phydev->speed == SPEED_10)
1460 MAC_MI_STAT_10MBPS_MODE |
1461 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1463 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1466 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1467 tw32(MAC_TX_LENGTHS,
1468 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1469 (6 << TX_LENGTHS_IPG_SHIFT) |
1470 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1472 tw32(MAC_TX_LENGTHS,
1473 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1474 (6 << TX_LENGTHS_IPG_SHIFT) |
1475 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1477 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1478 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1479 phydev->speed != tp->link_config.active_speed ||
1480 phydev->duplex != tp->link_config.active_duplex ||
1481 oldflowctrl != tp->link_config.active_flowctrl)
1484 tp->link_config.active_speed = phydev->speed;
1485 tp->link_config.active_duplex = phydev->duplex;
1487 spin_unlock_bh(&tp->lock);
1490 tg3_link_report(tp);
1493 static int tg3_phy_init(struct tg3 *tp)
1495 struct phy_device *phydev;
1497 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1500 /* Bring the PHY back to a known state. */
1503 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1505 /* Attach the MAC to the PHY. */
1506 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1507 phydev->dev_flags, phydev->interface);
1508 if (IS_ERR(phydev)) {
1509 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1510 return PTR_ERR(phydev);
1513 /* Mask with MAC supported features. */
1514 switch (phydev->interface) {
1515 case PHY_INTERFACE_MODE_GMII:
1516 case PHY_INTERFACE_MODE_RGMII:
1517 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1518 phydev->supported &= (PHY_GBIT_FEATURES |
1520 SUPPORTED_Asym_Pause);
1524 case PHY_INTERFACE_MODE_MII:
1525 phydev->supported &= (PHY_BASIC_FEATURES |
1527 SUPPORTED_Asym_Pause);
1530 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1534 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1536 phydev->advertising = phydev->supported;
1541 static void tg3_phy_start(struct tg3 *tp)
1543 struct phy_device *phydev;
1545 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1548 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1550 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1551 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1552 phydev->speed = tp->link_config.orig_speed;
1553 phydev->duplex = tp->link_config.orig_duplex;
1554 phydev->autoneg = tp->link_config.orig_autoneg;
1555 phydev->advertising = tp->link_config.orig_advertising;
1560 phy_start_aneg(phydev);
1563 static void tg3_phy_stop(struct tg3 *tp)
1565 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1568 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1571 static void tg3_phy_fini(struct tg3 *tp)
1573 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1574 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1575 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1579 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1583 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1585 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1590 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1594 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1596 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1601 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1605 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1608 tg3_writephy(tp, MII_TG3_FET_TEST,
1609 phytest | MII_TG3_FET_SHADOW_EN);
1610 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1612 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1614 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1615 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1617 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1621 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1625 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1626 ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
1627 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1630 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1631 tg3_phy_fet_toggle_apd(tp, enable);
1635 reg = MII_TG3_MISC_SHDW_WREN |
1636 MII_TG3_MISC_SHDW_SCR5_SEL |
1637 MII_TG3_MISC_SHDW_SCR5_LPED |
1638 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1639 MII_TG3_MISC_SHDW_SCR5_SDTL |
1640 MII_TG3_MISC_SHDW_SCR5_C125OE;
1641 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1642 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1644 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1647 reg = MII_TG3_MISC_SHDW_WREN |
1648 MII_TG3_MISC_SHDW_APD_SEL |
1649 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1651 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1653 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1656 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1661 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1664 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1667 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1668 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1670 tg3_writephy(tp, MII_TG3_FET_TEST,
1671 ephy | MII_TG3_FET_SHADOW_EN);
1672 if (!tg3_readphy(tp, reg, &phy)) {
1674 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1676 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1677 tg3_writephy(tp, reg, phy);
1679 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1682 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1683 MII_TG3_AUXCTL_SHDWSEL_MISC;
1684 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1685 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1687 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1689 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1690 phy |= MII_TG3_AUXCTL_MISC_WREN;
1691 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1696 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1700 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1703 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1704 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1705 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1706 (val | (1 << 15) | (1 << 4)));
1709 static void tg3_phy_apply_otp(struct tg3 *tp)
1718 /* Enable SM_DSP clock and tx 6dB coding. */
1719 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1720 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1721 MII_TG3_AUXCTL_ACTL_TX_6DB;
1722 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1724 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1725 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1726 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1728 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1729 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1730 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1732 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1733 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1734 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1736 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1737 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1739 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1740 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1742 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1743 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1744 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1746 /* Turn off SM_DSP clock. */
1747 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1748 MII_TG3_AUXCTL_ACTL_TX_6DB;
1749 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1752 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1756 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1761 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1762 current_link_up == 1 &&
1763 tp->link_config.active_duplex == DUPLEX_FULL &&
1764 (tp->link_config.active_speed == SPEED_100 ||
1765 tp->link_config.active_speed == SPEED_1000)) {
1768 if (tp->link_config.active_speed == SPEED_1000)
1769 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1771 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1773 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1775 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1776 TG3_CL45_D7_EEERES_STAT, &val);
1779 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1780 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1783 case ASIC_REV_57765:
1784 /* Enable SM_DSP clock and tx 6dB coding. */
1785 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1786 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1787 MII_TG3_AUXCTL_ACTL_TX_6DB;
1788 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1792 /* Turn off SM_DSP clock. */
1793 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1794 MII_TG3_AUXCTL_ACTL_TX_6DB;
1795 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1798 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1803 if (!tp->setlpicnt) {
1804 val = tr32(TG3_CPMU_EEE_MODE);
1805 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1809 static int tg3_wait_macro_done(struct tg3 *tp)
1816 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1817 if ((tmp32 & 0x1000) == 0)
1827 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1829 static const u32 test_pat[4][6] = {
1830 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1831 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1832 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1833 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1837 for (chan = 0; chan < 4; chan++) {
1840 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1841 (chan * 0x2000) | 0x0200);
1842 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1844 for (i = 0; i < 6; i++)
1845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1848 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1849 if (tg3_wait_macro_done(tp)) {
1854 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1855 (chan * 0x2000) | 0x0200);
1856 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1857 if (tg3_wait_macro_done(tp)) {
1862 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1863 if (tg3_wait_macro_done(tp)) {
1868 for (i = 0; i < 6; i += 2) {
1871 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1872 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1873 tg3_wait_macro_done(tp)) {
1879 if (low != test_pat[chan][i] ||
1880 high != test_pat[chan][i+1]) {
1881 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1882 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1883 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1893 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1897 for (chan = 0; chan < 4; chan++) {
1900 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1901 (chan * 0x2000) | 0x0200);
1902 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1903 for (i = 0; i < 6; i++)
1904 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1905 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1906 if (tg3_wait_macro_done(tp))
1913 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1915 u32 reg32, phy9_orig;
1916 int retries, do_phy_reset, err;
1922 err = tg3_bmcr_reset(tp);
1928 /* Disable transmitter and interrupt. */
1929 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1933 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1935 /* Set full-duplex, 1000 mbps. */
1936 tg3_writephy(tp, MII_BMCR,
1937 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1939 /* Set to master mode. */
1940 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1943 tg3_writephy(tp, MII_TG3_CTRL,
1944 (MII_TG3_CTRL_AS_MASTER |
1945 MII_TG3_CTRL_ENABLE_AS_MASTER));
1947 /* Enable SM_DSP_CLOCK and 6dB. */
1948 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1950 /* Block the PHY control access. */
1951 tg3_phydsp_write(tp, 0x8005, 0x0800);
1953 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1956 } while (--retries);
1958 err = tg3_phy_reset_chanpat(tp);
1962 tg3_phydsp_write(tp, 0x8005, 0x0000);
1964 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1965 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1969 /* Set Extended packet length bit for jumbo frames */
1970 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1972 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1975 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1977 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1979 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1986 /* This will reset the tigon3 PHY if there is no valid
1987 * link unless the FORCE argument is non-zero.
1989 static int tg3_phy_reset(struct tg3 *tp)
1994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1995 val = tr32(GRC_MISC_CFG);
1996 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1999 err = tg3_readphy(tp, MII_BMSR, &val);
2000 err |= tg3_readphy(tp, MII_BMSR, &val);
2004 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2005 netif_carrier_off(tp->dev);
2006 tg3_link_report(tp);
2009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2012 err = tg3_phy_reset_5703_4_5(tp);
2019 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2020 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2021 cpmuctrl = tr32(TG3_CPMU_CTRL);
2022 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2024 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2027 err = tg3_bmcr_reset(tp);
2031 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2032 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2033 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2035 tw32(TG3_CPMU_CTRL, cpmuctrl);
2038 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2039 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2040 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2041 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2042 CPMU_LSPD_1000MB_MACCLK_12_5) {
2043 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2045 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2049 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
2050 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2053 tg3_phy_apply_otp(tp);
2055 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2056 tg3_phy_toggle_apd(tp, true);
2058 tg3_phy_toggle_apd(tp, false);
2061 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
2062 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2063 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2064 tg3_phydsp_write(tp, 0x000a, 0x0323);
2065 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2067 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2068 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2069 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2071 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2072 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2073 tg3_phydsp_write(tp, 0x000a, 0x310b);
2074 tg3_phydsp_write(tp, 0x201f, 0x9506);
2075 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2076 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2077 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2078 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2079 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2080 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2081 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2082 tg3_writephy(tp, MII_TG3_TEST1,
2083 MII_TG3_TEST1_TRIM_EN | 0x4);
2085 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2086 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2088 /* Set Extended packet length bit (bit 14) on all chips that */
2089 /* support jumbo frames */
2090 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2091 /* Cannot do read-modify-write on 5401 */
2092 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2093 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2094 /* Set bit 14 with read-modify-write to preserve other bits */
2095 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2096 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
2097 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
2100 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2101 * jumbo frames transmission.
2103 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2104 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2105 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2106 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2110 /* adjust output voltage */
2111 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2114 tg3_phy_toggle_automdix(tp, 1);
2115 tg3_phy_set_wirespeed(tp);
2119 static void tg3_frob_aux_power(struct tg3 *tp)
2121 bool need_vaux = false;
2123 /* The GPIOs do something completely different on 57765. */
2124 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2125 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2129 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2133 tp->pdev_peer != tp->pdev) {
2134 struct net_device *dev_peer;
2136 dev_peer = pci_get_drvdata(tp->pdev_peer);
2138 /* remove_one() may have been run on the peer. */
2140 struct tg3 *tp_peer = netdev_priv(dev_peer);
2142 if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
2145 if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2146 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
2151 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2152 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2158 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2159 (GRC_LCLCTRL_GPIO_OE0 |
2160 GRC_LCLCTRL_GPIO_OE1 |
2161 GRC_LCLCTRL_GPIO_OE2 |
2162 GRC_LCLCTRL_GPIO_OUTPUT0 |
2163 GRC_LCLCTRL_GPIO_OUTPUT1),
2165 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2166 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2167 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2168 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2169 GRC_LCLCTRL_GPIO_OE1 |
2170 GRC_LCLCTRL_GPIO_OE2 |
2171 GRC_LCLCTRL_GPIO_OUTPUT0 |
2172 GRC_LCLCTRL_GPIO_OUTPUT1 |
2174 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2176 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2177 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2179 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2180 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2183 u32 grc_local_ctrl = 0;
2185 /* Workaround to prevent overdrawing Amps. */
2186 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2188 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2189 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2190 grc_local_ctrl, 100);
2193 /* On 5753 and variants, GPIO2 cannot be used. */
2194 no_gpio2 = tp->nic_sram_data_cfg &
2195 NIC_SRAM_DATA_CFG_NO_GPIO2;
2197 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2198 GRC_LCLCTRL_GPIO_OE1 |
2199 GRC_LCLCTRL_GPIO_OE2 |
2200 GRC_LCLCTRL_GPIO_OUTPUT1 |
2201 GRC_LCLCTRL_GPIO_OUTPUT2;
2203 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2204 GRC_LCLCTRL_GPIO_OUTPUT2);
2206 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2207 grc_local_ctrl, 100);
2209 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2211 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2212 grc_local_ctrl, 100);
2215 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2216 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2217 grc_local_ctrl, 100);
2221 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2222 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2223 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2224 (GRC_LCLCTRL_GPIO_OE1 |
2225 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2227 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2228 GRC_LCLCTRL_GPIO_OE1, 100);
2230 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2231 (GRC_LCLCTRL_GPIO_OE1 |
2232 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2237 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2239 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2241 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2242 if (speed != SPEED_10)
2244 } else if (speed == SPEED_10)
2250 static int tg3_setup_phy(struct tg3 *, int);
2252 #define RESET_KIND_SHUTDOWN 0
2253 #define RESET_KIND_INIT 1
2254 #define RESET_KIND_SUSPEND 2
2256 static void tg3_write_sig_post_reset(struct tg3 *, int);
2257 static int tg3_halt_cpu(struct tg3 *, u32);
2259 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2263 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2265 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2266 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2269 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2270 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2271 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2278 val = tr32(GRC_MISC_CFG);
2279 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2282 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2284 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2287 tg3_writephy(tp, MII_ADVERTISE, 0);
2288 tg3_writephy(tp, MII_BMCR,
2289 BMCR_ANENABLE | BMCR_ANRESTART);
2291 tg3_writephy(tp, MII_TG3_FET_TEST,
2292 phytest | MII_TG3_FET_SHADOW_EN);
2293 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2294 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2296 MII_TG3_FET_SHDW_AUXMODE4,
2299 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2302 } else if (do_low_power) {
2303 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2304 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2306 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2307 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2308 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2309 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2310 MII_TG3_AUXCTL_PCTL_VREG_11V);
2313 /* The PHY should not be powered down on some chips because
2316 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2317 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2318 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2319 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2322 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2323 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2324 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2325 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2326 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2327 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2330 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2333 /* tp->lock is held. */
2334 static int tg3_nvram_lock(struct tg3 *tp)
2336 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2339 if (tp->nvram_lock_cnt == 0) {
2340 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2341 for (i = 0; i < 8000; i++) {
2342 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2347 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2351 tp->nvram_lock_cnt++;
2356 /* tp->lock is held. */
2357 static void tg3_nvram_unlock(struct tg3 *tp)
2359 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2360 if (tp->nvram_lock_cnt > 0)
2361 tp->nvram_lock_cnt--;
2362 if (tp->nvram_lock_cnt == 0)
2363 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2367 /* tp->lock is held. */
2368 static void tg3_enable_nvram_access(struct tg3 *tp)
2370 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2371 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2372 u32 nvaccess = tr32(NVRAM_ACCESS);
2374 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2378 /* tp->lock is held. */
2379 static void tg3_disable_nvram_access(struct tg3 *tp)
2381 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2382 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2383 u32 nvaccess = tr32(NVRAM_ACCESS);
2385 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2389 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2390 u32 offset, u32 *val)
2395 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2398 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2399 EEPROM_ADDR_DEVID_MASK |
2401 tw32(GRC_EEPROM_ADDR,
2403 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2404 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2405 EEPROM_ADDR_ADDR_MASK) |
2406 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2408 for (i = 0; i < 1000; i++) {
2409 tmp = tr32(GRC_EEPROM_ADDR);
2411 if (tmp & EEPROM_ADDR_COMPLETE)
2415 if (!(tmp & EEPROM_ADDR_COMPLETE))
2418 tmp = tr32(GRC_EEPROM_DATA);
2421 * The data will always be opposite the native endian
2422 * format. Perform a blind byteswap to compensate.
2429 #define NVRAM_CMD_TIMEOUT 10000
2431 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2435 tw32(NVRAM_CMD, nvram_cmd);
2436 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2438 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2444 if (i == NVRAM_CMD_TIMEOUT)
2450 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2452 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2453 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2454 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2455 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2456 (tp->nvram_jedecnum == JEDEC_ATMEL))
2458 addr = ((addr / tp->nvram_pagesize) <<
2459 ATMEL_AT45DB0X1B_PAGE_POS) +
2460 (addr % tp->nvram_pagesize);
2465 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2467 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2468 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2469 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2470 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2471 (tp->nvram_jedecnum == JEDEC_ATMEL))
2473 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2474 tp->nvram_pagesize) +
2475 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2480 /* NOTE: Data read in from NVRAM is byteswapped according to
2481 * the byteswapping settings for all other register accesses.
2482 * tg3 devices are BE devices, so on a BE machine, the data
2483 * returned will be exactly as it is seen in NVRAM. On a LE
2484 * machine, the 32-bit value will be byteswapped.
2486 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2490 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2491 return tg3_nvram_read_using_eeprom(tp, offset, val);
2493 offset = tg3_nvram_phys_addr(tp, offset);
2495 if (offset > NVRAM_ADDR_MSK)
2498 ret = tg3_nvram_lock(tp);
2502 tg3_enable_nvram_access(tp);
2504 tw32(NVRAM_ADDR, offset);
2505 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2506 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2509 *val = tr32(NVRAM_RDDATA);
2511 tg3_disable_nvram_access(tp);
2513 tg3_nvram_unlock(tp);
2518 /* Ensures NVRAM data is in bytestream format. */
2519 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2522 int res = tg3_nvram_read(tp, offset, &v);
2524 *val = cpu_to_be32(v);
2528 /* tp->lock is held. */
2529 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2531 u32 addr_high, addr_low;
2534 addr_high = ((tp->dev->dev_addr[0] << 8) |
2535 tp->dev->dev_addr[1]);
2536 addr_low = ((tp->dev->dev_addr[2] << 24) |
2537 (tp->dev->dev_addr[3] << 16) |
2538 (tp->dev->dev_addr[4] << 8) |
2539 (tp->dev->dev_addr[5] << 0));
2540 for (i = 0; i < 4; i++) {
2541 if (i == 1 && skip_mac_1)
2543 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2544 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2549 for (i = 0; i < 12; i++) {
2550 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2551 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2555 addr_high = (tp->dev->dev_addr[0] +
2556 tp->dev->dev_addr[1] +
2557 tp->dev->dev_addr[2] +
2558 tp->dev->dev_addr[3] +
2559 tp->dev->dev_addr[4] +
2560 tp->dev->dev_addr[5]) &
2561 TX_BACKOFF_SEED_MASK;
2562 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2565 static void tg3_enable_register_access(struct tg3 *tp)
2568 * Make sure register accesses (indirect or otherwise) will function
2571 pci_write_config_dword(tp->pdev,
2572 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2575 static int tg3_power_up(struct tg3 *tp)
2577 tg3_enable_register_access(tp);
2579 pci_set_power_state(tp->pdev, PCI_D0);
2581 /* Switch out of Vaux if it is a NIC */
2582 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2583 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2588 static int tg3_power_down_prepare(struct tg3 *tp)
2591 bool device_should_wake, do_low_power;
2593 tg3_enable_register_access(tp);
2595 /* Restore the CLKREQ setting. */
2596 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2599 pci_read_config_word(tp->pdev,
2600 tp->pcie_cap + PCI_EXP_LNKCTL,
2602 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2603 pci_write_config_word(tp->pdev,
2604 tp->pcie_cap + PCI_EXP_LNKCTL,
2608 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2609 tw32(TG3PCI_MISC_HOST_CTRL,
2610 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2612 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2613 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2615 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2616 do_low_power = false;
2617 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2618 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2619 struct phy_device *phydev;
2620 u32 phyid, advertising;
2622 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2624 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2626 tp->link_config.orig_speed = phydev->speed;
2627 tp->link_config.orig_duplex = phydev->duplex;
2628 tp->link_config.orig_autoneg = phydev->autoneg;
2629 tp->link_config.orig_advertising = phydev->advertising;
2631 advertising = ADVERTISED_TP |
2633 ADVERTISED_Autoneg |
2634 ADVERTISED_10baseT_Half;
2636 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2637 device_should_wake) {
2638 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2640 ADVERTISED_100baseT_Half |
2641 ADVERTISED_100baseT_Full |
2642 ADVERTISED_10baseT_Full;
2644 advertising |= ADVERTISED_10baseT_Full;
2647 phydev->advertising = advertising;
2649 phy_start_aneg(phydev);
2651 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2652 if (phyid != PHY_ID_BCMAC131) {
2653 phyid &= PHY_BCM_OUI_MASK;
2654 if (phyid == PHY_BCM_OUI_1 ||
2655 phyid == PHY_BCM_OUI_2 ||
2656 phyid == PHY_BCM_OUI_3)
2657 do_low_power = true;
2661 do_low_power = true;
2663 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2664 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2665 tp->link_config.orig_speed = tp->link_config.speed;
2666 tp->link_config.orig_duplex = tp->link_config.duplex;
2667 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2670 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2671 tp->link_config.speed = SPEED_10;
2672 tp->link_config.duplex = DUPLEX_HALF;
2673 tp->link_config.autoneg = AUTONEG_ENABLE;
2674 tg3_setup_phy(tp, 0);
2678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2681 val = tr32(GRC_VCPU_EXT_CTRL);
2682 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2683 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2687 for (i = 0; i < 200; i++) {
2688 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2689 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2694 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2695 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2696 WOL_DRV_STATE_SHUTDOWN |
2700 if (device_should_wake) {
2703 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2705 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2709 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2710 mac_mode = MAC_MODE_PORT_MODE_GMII;
2712 mac_mode = MAC_MODE_PORT_MODE_MII;
2714 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2715 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2717 u32 speed = (tp->tg3_flags &
2718 TG3_FLAG_WOL_SPEED_100MB) ?
2719 SPEED_100 : SPEED_10;
2720 if (tg3_5700_link_polarity(tp, speed))
2721 mac_mode |= MAC_MODE_LINK_POLARITY;
2723 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2726 mac_mode = MAC_MODE_PORT_MODE_TBI;
2729 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2730 tw32(MAC_LED_CTRL, tp->led_ctrl);
2732 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2733 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2734 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2735 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2736 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2737 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2739 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2740 mac_mode |= MAC_MODE_APE_TX_EN |
2741 MAC_MODE_APE_RX_EN |
2742 MAC_MODE_TDE_ENABLE;
2744 tw32_f(MAC_MODE, mac_mode);
2747 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2751 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2752 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2756 base_val = tp->pci_clock_ctrl;
2757 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2758 CLOCK_CTRL_TXCLK_DISABLE);
2760 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2761 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2762 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2763 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2764 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2766 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2767 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2768 u32 newbits1, newbits2;
2770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2771 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2772 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2773 CLOCK_CTRL_TXCLK_DISABLE |
2775 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2776 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2777 newbits1 = CLOCK_CTRL_625_CORE;
2778 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2780 newbits1 = CLOCK_CTRL_ALTCLK;
2781 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2784 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2787 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2790 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2795 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2796 CLOCK_CTRL_TXCLK_DISABLE |
2797 CLOCK_CTRL_44MHZ_CORE);
2799 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2802 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2803 tp->pci_clock_ctrl | newbits3, 40);
2807 if (!(device_should_wake) &&
2808 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2809 tg3_power_down_phy(tp, do_low_power);
2811 tg3_frob_aux_power(tp);
2813 /* Workaround for unstable PLL clock */
2814 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2815 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2816 u32 val = tr32(0x7d00);
2818 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2820 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2823 err = tg3_nvram_lock(tp);
2824 tg3_halt_cpu(tp, RX_CPU_BASE);
2826 tg3_nvram_unlock(tp);
2830 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2835 static void tg3_power_down(struct tg3 *tp)
2837 tg3_power_down_prepare(tp);
2839 pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2840 pci_set_power_state(tp->pdev, PCI_D3hot);
2843 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2845 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2846 case MII_TG3_AUX_STAT_10HALF:
2848 *duplex = DUPLEX_HALF;
2851 case MII_TG3_AUX_STAT_10FULL:
2853 *duplex = DUPLEX_FULL;
2856 case MII_TG3_AUX_STAT_100HALF:
2858 *duplex = DUPLEX_HALF;
2861 case MII_TG3_AUX_STAT_100FULL:
2863 *duplex = DUPLEX_FULL;
2866 case MII_TG3_AUX_STAT_1000HALF:
2867 *speed = SPEED_1000;
2868 *duplex = DUPLEX_HALF;
2871 case MII_TG3_AUX_STAT_1000FULL:
2872 *speed = SPEED_1000;
2873 *duplex = DUPLEX_FULL;
2877 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2878 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2880 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2884 *speed = SPEED_INVALID;
2885 *duplex = DUPLEX_INVALID;
2890 static void tg3_phy_copper_begin(struct tg3 *tp)
2895 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2896 /* Entering low power mode. Disable gigabit and
2897 * 100baseT advertisements.
2899 tg3_writephy(tp, MII_TG3_CTRL, 0);
2901 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2902 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2903 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2904 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2906 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2907 } else if (tp->link_config.speed == SPEED_INVALID) {
2908 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2909 tp->link_config.advertising &=
2910 ~(ADVERTISED_1000baseT_Half |
2911 ADVERTISED_1000baseT_Full);
2913 new_adv = ADVERTISE_CSMA;
2914 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2915 new_adv |= ADVERTISE_10HALF;
2916 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2917 new_adv |= ADVERTISE_10FULL;
2918 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2919 new_adv |= ADVERTISE_100HALF;
2920 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2921 new_adv |= ADVERTISE_100FULL;
2923 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2925 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2927 if (tp->link_config.advertising &
2928 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2930 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2931 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2932 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2933 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2934 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2935 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2936 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2937 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2938 MII_TG3_CTRL_ENABLE_AS_MASTER);
2939 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2941 tg3_writephy(tp, MII_TG3_CTRL, 0);
2944 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2945 new_adv |= ADVERTISE_CSMA;
2947 /* Asking for a specific link mode. */
2948 if (tp->link_config.speed == SPEED_1000) {
2949 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951 if (tp->link_config.duplex == DUPLEX_FULL)
2952 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2954 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2955 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2956 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2957 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2958 MII_TG3_CTRL_ENABLE_AS_MASTER);
2960 if (tp->link_config.speed == SPEED_100) {
2961 if (tp->link_config.duplex == DUPLEX_FULL)
2962 new_adv |= ADVERTISE_100FULL;
2964 new_adv |= ADVERTISE_100HALF;
2966 if (tp->link_config.duplex == DUPLEX_FULL)
2967 new_adv |= ADVERTISE_10FULL;
2969 new_adv |= ADVERTISE_10HALF;
2971 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2976 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2979 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2982 tw32(TG3_CPMU_EEE_MODE,
2983 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2985 /* Enable SM_DSP clock and tx 6dB coding. */
2986 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
2987 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
2988 MII_TG3_AUXCTL_ACTL_TX_6DB;
2989 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2991 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2993 case ASIC_REV_57765:
2994 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2995 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2996 MII_TG3_DSP_CH34TP2_HIBW01);
2999 val = MII_TG3_DSP_TAP26_ALNOKO |
3000 MII_TG3_DSP_TAP26_RMRXSTO |
3001 MII_TG3_DSP_TAP26_OPCSINPT;
3002 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3006 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3007 /* Advertise 100-BaseTX EEE ability */
3008 if (tp->link_config.advertising &
3009 ADVERTISED_100baseT_Full)
3010 val |= MDIO_AN_EEE_ADV_100TX;
3011 /* Advertise 1000-BaseT EEE ability */
3012 if (tp->link_config.advertising &
3013 ADVERTISED_1000baseT_Full)
3014 val |= MDIO_AN_EEE_ADV_1000T;
3016 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3018 /* Turn off SM_DSP clock. */
3019 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
3020 MII_TG3_AUXCTL_ACTL_TX_6DB;
3021 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3024 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3025 tp->link_config.speed != SPEED_INVALID) {
3026 u32 bmcr, orig_bmcr;
3028 tp->link_config.active_speed = tp->link_config.speed;
3029 tp->link_config.active_duplex = tp->link_config.duplex;
3032 switch (tp->link_config.speed) {
3038 bmcr |= BMCR_SPEED100;
3042 bmcr |= TG3_BMCR_SPEED1000;
3046 if (tp->link_config.duplex == DUPLEX_FULL)
3047 bmcr |= BMCR_FULLDPLX;
3049 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3050 (bmcr != orig_bmcr)) {
3051 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3052 for (i = 0; i < 1500; i++) {
3056 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3057 tg3_readphy(tp, MII_BMSR, &tmp))
3059 if (!(tmp & BMSR_LSTATUS)) {
3064 tg3_writephy(tp, MII_BMCR, bmcr);
3068 tg3_writephy(tp, MII_BMCR,
3069 BMCR_ANENABLE | BMCR_ANRESTART);
3073 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3077 /* Turn off tap power management. */
3078 /* Set Extended packet length bit */
3079 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
3081 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3082 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3083 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3084 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3085 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3092 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3094 u32 adv_reg, all_mask = 0;
3096 if (mask & ADVERTISED_10baseT_Half)
3097 all_mask |= ADVERTISE_10HALF;
3098 if (mask & ADVERTISED_10baseT_Full)
3099 all_mask |= ADVERTISE_10FULL;
3100 if (mask & ADVERTISED_100baseT_Half)
3101 all_mask |= ADVERTISE_100HALF;
3102 if (mask & ADVERTISED_100baseT_Full)
3103 all_mask |= ADVERTISE_100FULL;
3105 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3108 if ((adv_reg & all_mask) != all_mask)
3110 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3114 if (mask & ADVERTISED_1000baseT_Half)
3115 all_mask |= ADVERTISE_1000HALF;
3116 if (mask & ADVERTISED_1000baseT_Full)
3117 all_mask |= ADVERTISE_1000FULL;
3119 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3122 if ((tg3_ctrl & all_mask) != all_mask)
3128 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3132 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3135 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3136 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3138 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3139 if (curadv != reqadv)
3142 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3143 tg3_readphy(tp, MII_LPA, rmtadv);
3145 /* Reprogram the advertisement register, even if it
3146 * does not affect the current link. If the link
3147 * gets renegotiated in the future, we can save an
3148 * additional renegotiation cycle by advertising
3149 * it correctly in the first place.
3151 if (curadv != reqadv) {
3152 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3153 ADVERTISE_PAUSE_ASYM);
3154 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3161 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3163 int current_link_up;
3165 u32 lcl_adv, rmt_adv;
3173 (MAC_STATUS_SYNC_CHANGED |
3174 MAC_STATUS_CFG_CHANGED |
3175 MAC_STATUS_MI_COMPLETION |
3176 MAC_STATUS_LNKSTATE_CHANGED));
3179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3185 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3187 /* Some third-party PHYs need to be reset on link going
3190 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3193 netif_carrier_ok(tp->dev)) {
3194 tg3_readphy(tp, MII_BMSR, &bmsr);
3195 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3196 !(bmsr & BMSR_LSTATUS))
3202 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3203 tg3_readphy(tp, MII_BMSR, &bmsr);
3204 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3205 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3208 if (!(bmsr & BMSR_LSTATUS)) {
3209 err = tg3_init_5401phy_dsp(tp);
3213 tg3_readphy(tp, MII_BMSR, &bmsr);
3214 for (i = 0; i < 1000; i++) {
3216 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3217 (bmsr & BMSR_LSTATUS)) {
3223 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3224 TG3_PHY_REV_BCM5401_B0 &&
3225 !(bmsr & BMSR_LSTATUS) &&
3226 tp->link_config.active_speed == SPEED_1000) {
3227 err = tg3_phy_reset(tp);
3229 err = tg3_init_5401phy_dsp(tp);
3234 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3235 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3236 /* 5701 {A0,B0} CRC bug workaround */
3237 tg3_writephy(tp, 0x15, 0x0a75);
3238 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3239 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3240 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3243 /* Clear pending interrupts... */
3244 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3245 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3247 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3248 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3249 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3250 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3254 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3255 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3256 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3258 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3261 current_link_up = 0;
3262 current_speed = SPEED_INVALID;
3263 current_duplex = DUPLEX_INVALID;
3265 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3266 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3267 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3268 if (!(val & (1 << 10))) {
3270 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3276 for (i = 0; i < 100; i++) {
3277 tg3_readphy(tp, MII_BMSR, &bmsr);
3278 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3279 (bmsr & BMSR_LSTATUS))
3284 if (bmsr & BMSR_LSTATUS) {
3287 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3288 for (i = 0; i < 2000; i++) {
3290 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3295 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3300 for (i = 0; i < 200; i++) {
3301 tg3_readphy(tp, MII_BMCR, &bmcr);
3302 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3304 if (bmcr && bmcr != 0x7fff)
3312 tp->link_config.active_speed = current_speed;
3313 tp->link_config.active_duplex = current_duplex;
3315 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3316 if ((bmcr & BMCR_ANENABLE) &&
3317 tg3_copper_is_advertising_all(tp,
3318 tp->link_config.advertising)) {
3319 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3321 current_link_up = 1;
3324 if (!(bmcr & BMCR_ANENABLE) &&
3325 tp->link_config.speed == current_speed &&
3326 tp->link_config.duplex == current_duplex &&
3327 tp->link_config.flowctrl ==
3328 tp->link_config.active_flowctrl) {
3329 current_link_up = 1;
3333 if (current_link_up == 1 &&
3334 tp->link_config.active_duplex == DUPLEX_FULL)
3335 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3339 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3340 tg3_phy_copper_begin(tp);
3342 tg3_readphy(tp, MII_BMSR, &bmsr);
3343 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3344 (bmsr & BMSR_LSTATUS))
3345 current_link_up = 1;
3348 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3349 if (current_link_up == 1) {
3350 if (tp->link_config.active_speed == SPEED_100 ||
3351 tp->link_config.active_speed == SPEED_10)
3352 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3354 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3355 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3356 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3358 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3360 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3361 if (tp->link_config.active_duplex == DUPLEX_HALF)
3362 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3365 if (current_link_up == 1 &&
3366 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3367 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3369 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3372 /* ??? Without this setting Netgear GA302T PHY does not
3373 * ??? send/receive packets...
3375 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3376 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3377 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3378 tw32_f(MAC_MI_MODE, tp->mi_mode);
3382 tw32_f(MAC_MODE, tp->mac_mode);
3385 tg3_phy_eee_adjust(tp, current_link_up);
3387 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3388 /* Polled via timer. */
3389 tw32_f(MAC_EVENT, 0);
3391 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3396 current_link_up == 1 &&
3397 tp->link_config.active_speed == SPEED_1000 &&
3398 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3399 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3402 (MAC_STATUS_SYNC_CHANGED |
3403 MAC_STATUS_CFG_CHANGED));
3406 NIC_SRAM_FIRMWARE_MBOX,
3407 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3410 /* Prevent send BD corruption. */
3411 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3412 u16 oldlnkctl, newlnkctl;
3414 pci_read_config_word(tp->pdev,
3415 tp->pcie_cap + PCI_EXP_LNKCTL,
3417 if (tp->link_config.active_speed == SPEED_100 ||
3418 tp->link_config.active_speed == SPEED_10)
3419 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3421 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3422 if (newlnkctl != oldlnkctl)
3423 pci_write_config_word(tp->pdev,
3424 tp->pcie_cap + PCI_EXP_LNKCTL,
3428 if (current_link_up != netif_carrier_ok(tp->dev)) {
3429 if (current_link_up)
3430 netif_carrier_on(tp->dev);
3432 netif_carrier_off(tp->dev);
3433 tg3_link_report(tp);
3439 struct tg3_fiber_aneginfo {
3441 #define ANEG_STATE_UNKNOWN 0
3442 #define ANEG_STATE_AN_ENABLE 1
3443 #define ANEG_STATE_RESTART_INIT 2
3444 #define ANEG_STATE_RESTART 3
3445 #define ANEG_STATE_DISABLE_LINK_OK 4
3446 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3447 #define ANEG_STATE_ABILITY_DETECT 6
3448 #define ANEG_STATE_ACK_DETECT_INIT 7
3449 #define ANEG_STATE_ACK_DETECT 8
3450 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3451 #define ANEG_STATE_COMPLETE_ACK 10
3452 #define ANEG_STATE_IDLE_DETECT_INIT 11
3453 #define ANEG_STATE_IDLE_DETECT 12
3454 #define ANEG_STATE_LINK_OK 13
3455 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3456 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3459 #define MR_AN_ENABLE 0x00000001
3460 #define MR_RESTART_AN 0x00000002
3461 #define MR_AN_COMPLETE 0x00000004
3462 #define MR_PAGE_RX 0x00000008
3463 #define MR_NP_LOADED 0x00000010
3464 #define MR_TOGGLE_TX 0x00000020
3465 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3466 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3467 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3468 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3469 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3470 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3471 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3472 #define MR_TOGGLE_RX 0x00002000
3473 #define MR_NP_RX 0x00004000
3475 #define MR_LINK_OK 0x80000000
3477 unsigned long link_time, cur_time;
3479 u32 ability_match_cfg;
3480 int ability_match_count;
3482 char ability_match, idle_match, ack_match;
3484 u32 txconfig, rxconfig;
3485 #define ANEG_CFG_NP 0x00000080
3486 #define ANEG_CFG_ACK 0x00000040
3487 #define ANEG_CFG_RF2 0x00000020
3488 #define ANEG_CFG_RF1 0x00000010
3489 #define ANEG_CFG_PS2 0x00000001
3490 #define ANEG_CFG_PS1 0x00008000
3491 #define ANEG_CFG_HD 0x00004000
3492 #define ANEG_CFG_FD 0x00002000
3493 #define ANEG_CFG_INVAL 0x00001f06
3498 #define ANEG_TIMER_ENAB 2
3499 #define ANEG_FAILED -1
3501 #define ANEG_STATE_SETTLE_TIME 10000
3503 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3504 struct tg3_fiber_aneginfo *ap)
3507 unsigned long delta;
3511 if (ap->state == ANEG_STATE_UNKNOWN) {
3515 ap->ability_match_cfg = 0;
3516 ap->ability_match_count = 0;
3517 ap->ability_match = 0;
3523 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3524 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3526 if (rx_cfg_reg != ap->ability_match_cfg) {
3527 ap->ability_match_cfg = rx_cfg_reg;
3528 ap->ability_match = 0;
3529 ap->ability_match_count = 0;
3531 if (++ap->ability_match_count > 1) {
3532 ap->ability_match = 1;
3533 ap->ability_match_cfg = rx_cfg_reg;
3536 if (rx_cfg_reg & ANEG_CFG_ACK)
3544 ap->ability_match_cfg = 0;
3545 ap->ability_match_count = 0;
3546 ap->ability_match = 0;
3552 ap->rxconfig = rx_cfg_reg;
3555 switch (ap->state) {
3556 case ANEG_STATE_UNKNOWN:
3557 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3558 ap->state = ANEG_STATE_AN_ENABLE;
3561 case ANEG_STATE_AN_ENABLE:
3562 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3563 if (ap->flags & MR_AN_ENABLE) {
3566 ap->ability_match_cfg = 0;
3567 ap->ability_match_count = 0;
3568 ap->ability_match = 0;
3572 ap->state = ANEG_STATE_RESTART_INIT;
3574 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3578 case ANEG_STATE_RESTART_INIT:
3579 ap->link_time = ap->cur_time;
3580 ap->flags &= ~(MR_NP_LOADED);
3582 tw32(MAC_TX_AUTO_NEG, 0);
3583 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3584 tw32_f(MAC_MODE, tp->mac_mode);
3587 ret = ANEG_TIMER_ENAB;
3588 ap->state = ANEG_STATE_RESTART;
3591 case ANEG_STATE_RESTART:
3592 delta = ap->cur_time - ap->link_time;
3593 if (delta > ANEG_STATE_SETTLE_TIME)
3594 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3596 ret = ANEG_TIMER_ENAB;
3599 case ANEG_STATE_DISABLE_LINK_OK:
3603 case ANEG_STATE_ABILITY_DETECT_INIT:
3604 ap->flags &= ~(MR_TOGGLE_TX);
3605 ap->txconfig = ANEG_CFG_FD;
3606 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3607 if (flowctrl & ADVERTISE_1000XPAUSE)
3608 ap->txconfig |= ANEG_CFG_PS1;
3609 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3610 ap->txconfig |= ANEG_CFG_PS2;
3611 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3612 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3613 tw32_f(MAC_MODE, tp->mac_mode);
3616 ap->state = ANEG_STATE_ABILITY_DETECT;
3619 case ANEG_STATE_ABILITY_DETECT:
3620 if (ap->ability_match != 0 && ap->rxconfig != 0)
3621 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3624 case ANEG_STATE_ACK_DETECT_INIT:
3625 ap->txconfig |= ANEG_CFG_ACK;
3626 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3627 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3628 tw32_f(MAC_MODE, tp->mac_mode);
3631 ap->state = ANEG_STATE_ACK_DETECT;
3634 case ANEG_STATE_ACK_DETECT:
3635 if (ap->ack_match != 0) {
3636 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3637 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3638 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3640 ap->state = ANEG_STATE_AN_ENABLE;
3642 } else if (ap->ability_match != 0 &&
3643 ap->rxconfig == 0) {
3644 ap->state = ANEG_STATE_AN_ENABLE;
3648 case ANEG_STATE_COMPLETE_ACK_INIT:
3649 if (ap->rxconfig & ANEG_CFG_INVAL) {
3653 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3654 MR_LP_ADV_HALF_DUPLEX |
3655 MR_LP_ADV_SYM_PAUSE |
3656 MR_LP_ADV_ASYM_PAUSE |
3657 MR_LP_ADV_REMOTE_FAULT1 |
3658 MR_LP_ADV_REMOTE_FAULT2 |
3659 MR_LP_ADV_NEXT_PAGE |
3662 if (ap->rxconfig & ANEG_CFG_FD)
3663 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3664 if (ap->rxconfig & ANEG_CFG_HD)
3665 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3666 if (ap->rxconfig & ANEG_CFG_PS1)
3667 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3668 if (ap->rxconfig & ANEG_CFG_PS2)
3669 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3670 if (ap->rxconfig & ANEG_CFG_RF1)
3671 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3672 if (ap->rxconfig & ANEG_CFG_RF2)
3673 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3674 if (ap->rxconfig & ANEG_CFG_NP)
3675 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3677 ap->link_time = ap->cur_time;
3679 ap->flags ^= (MR_TOGGLE_TX);
3680 if (ap->rxconfig & 0x0008)
3681 ap->flags |= MR_TOGGLE_RX;
3682 if (ap->rxconfig & ANEG_CFG_NP)
3683 ap->flags |= MR_NP_RX;
3684 ap->flags |= MR_PAGE_RX;
3686 ap->state = ANEG_STATE_COMPLETE_ACK;
3687 ret = ANEG_TIMER_ENAB;
3690 case ANEG_STATE_COMPLETE_ACK:
3691 if (ap->ability_match != 0 &&
3692 ap->rxconfig == 0) {
3693 ap->state = ANEG_STATE_AN_ENABLE;
3696 delta = ap->cur_time - ap->link_time;
3697 if (delta > ANEG_STATE_SETTLE_TIME) {
3698 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3699 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3701 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3702 !(ap->flags & MR_NP_RX)) {
3703 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3711 case ANEG_STATE_IDLE_DETECT_INIT:
3712 ap->link_time = ap->cur_time;
3713 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3714 tw32_f(MAC_MODE, tp->mac_mode);
3717 ap->state = ANEG_STATE_IDLE_DETECT;
3718 ret = ANEG_TIMER_ENAB;
3721 case ANEG_STATE_IDLE_DETECT:
3722 if (ap->ability_match != 0 &&
3723 ap->rxconfig == 0) {
3724 ap->state = ANEG_STATE_AN_ENABLE;
3727 delta = ap->cur_time - ap->link_time;
3728 if (delta > ANEG_STATE_SETTLE_TIME) {
3729 /* XXX another gem from the Broadcom driver :( */
3730 ap->state = ANEG_STATE_LINK_OK;
3734 case ANEG_STATE_LINK_OK:
3735 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3739 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3740 /* ??? unimplemented */
3743 case ANEG_STATE_NEXT_PAGE_WAIT:
3744 /* ??? unimplemented */
3755 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3758 struct tg3_fiber_aneginfo aninfo;
3759 int status = ANEG_FAILED;
3763 tw32_f(MAC_TX_AUTO_NEG, 0);
3765 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3766 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3769 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3772 memset(&aninfo, 0, sizeof(aninfo));
3773 aninfo.flags |= MR_AN_ENABLE;
3774 aninfo.state = ANEG_STATE_UNKNOWN;
3775 aninfo.cur_time = 0;
3777 while (++tick < 195000) {
3778 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3779 if (status == ANEG_DONE || status == ANEG_FAILED)
3785 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3786 tw32_f(MAC_MODE, tp->mac_mode);
3789 *txflags = aninfo.txconfig;
3790 *rxflags = aninfo.flags;
3792 if (status == ANEG_DONE &&
3793 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3794 MR_LP_ADV_FULL_DUPLEX)))
3800 static void tg3_init_bcm8002(struct tg3 *tp)
3802 u32 mac_status = tr32(MAC_STATUS);
3805 /* Reset when initting first time or we have a link. */
3806 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3807 !(mac_status & MAC_STATUS_PCS_SYNCED))
3810 /* Set PLL lock range. */
3811 tg3_writephy(tp, 0x16, 0x8007);
3814 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3816 /* Wait for reset to complete. */
3817 /* XXX schedule_timeout() ... */
3818 for (i = 0; i < 500; i++)
3821 /* Config mode; select PMA/Ch 1 regs. */
3822 tg3_writephy(tp, 0x10, 0x8411);
3824 /* Enable auto-lock and comdet, select txclk for tx. */
3825 tg3_writephy(tp, 0x11, 0x0a10);
3827 tg3_writephy(tp, 0x18, 0x00a0);
3828 tg3_writephy(tp, 0x16, 0x41ff);
3830 /* Assert and deassert POR. */
3831 tg3_writephy(tp, 0x13, 0x0400);
3833 tg3_writephy(tp, 0x13, 0x0000);
3835 tg3_writephy(tp, 0x11, 0x0a50);
3837 tg3_writephy(tp, 0x11, 0x0a10);
3839 /* Wait for signal to stabilize */
3840 /* XXX schedule_timeout() ... */
3841 for (i = 0; i < 15000; i++)
3844 /* Deselect the channel register so we can read the PHYID
3847 tg3_writephy(tp, 0x10, 0x8011);
3850 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3853 u32 sg_dig_ctrl, sg_dig_status;
3854 u32 serdes_cfg, expected_sg_dig_ctrl;
3855 int workaround, port_a;
3856 int current_link_up;
3859 expected_sg_dig_ctrl = 0;
3862 current_link_up = 0;
3864 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3865 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3867 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3870 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3871 /* preserve bits 20-23 for voltage regulator */
3872 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3875 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3877 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3878 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3880 u32 val = serdes_cfg;
3886 tw32_f(MAC_SERDES_CFG, val);
3889 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3891 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3892 tg3_setup_flow_control(tp, 0, 0);
3893 current_link_up = 1;
3898 /* Want auto-negotiation. */
3899 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3901 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3902 if (flowctrl & ADVERTISE_1000XPAUSE)
3903 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3904 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3905 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3907 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3908 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3909 tp->serdes_counter &&
3910 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3911 MAC_STATUS_RCVD_CFG)) ==
3912 MAC_STATUS_PCS_SYNCED)) {
3913 tp->serdes_counter--;
3914 current_link_up = 1;
3919 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3920 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3922 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3924 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3925 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3926 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3927 MAC_STATUS_SIGNAL_DET)) {
3928 sg_dig_status = tr32(SG_DIG_STATUS);
3929 mac_status = tr32(MAC_STATUS);
3931 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3932 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3933 u32 local_adv = 0, remote_adv = 0;
3935 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3936 local_adv |= ADVERTISE_1000XPAUSE;
3937 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3938 local_adv |= ADVERTISE_1000XPSE_ASYM;
3940 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3941 remote_adv |= LPA_1000XPAUSE;
3942 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3943 remote_adv |= LPA_1000XPAUSE_ASYM;
3945 tg3_setup_flow_control(tp, local_adv, remote_adv);
3946 current_link_up = 1;
3947 tp->serdes_counter = 0;
3948 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3949 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3950 if (tp->serdes_counter)
3951 tp->serdes_counter--;
3954 u32 val = serdes_cfg;
3961 tw32_f(MAC_SERDES_CFG, val);
3964 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3967 /* Link parallel detection - link is up */
3968 /* only if we have PCS_SYNC and not */
3969 /* receiving config code words */
3970 mac_status = tr32(MAC_STATUS);
3971 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3972 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3973 tg3_setup_flow_control(tp, 0, 0);
3974 current_link_up = 1;
3976 TG3_PHYFLG_PARALLEL_DETECT;
3977 tp->serdes_counter =
3978 SERDES_PARALLEL_DET_TIMEOUT;
3980 goto restart_autoneg;
3984 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3985 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3989 return current_link_up;
3992 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3994 int current_link_up = 0;
3996 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3999 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4000 u32 txflags, rxflags;
4003 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4004 u32 local_adv = 0, remote_adv = 0;
4006 if (txflags & ANEG_CFG_PS1)
4007 local_adv |= ADVERTISE_1000XPAUSE;
4008 if (txflags & ANEG_CFG_PS2)
4009 local_adv |= ADVERTISE_1000XPSE_ASYM;
4011 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4012 remote_adv |= LPA_1000XPAUSE;
4013 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4014 remote_adv |= LPA_1000XPAUSE_ASYM;
4016 tg3_setup_flow_control(tp, local_adv, remote_adv);
4018 current_link_up = 1;
4020 for (i = 0; i < 30; i++) {
4023 (MAC_STATUS_SYNC_CHANGED |
4024 MAC_STATUS_CFG_CHANGED));
4026 if ((tr32(MAC_STATUS) &
4027 (MAC_STATUS_SYNC_CHANGED |
4028 MAC_STATUS_CFG_CHANGED)) == 0)
4032 mac_status = tr32(MAC_STATUS);
4033 if (current_link_up == 0 &&
4034 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4035 !(mac_status & MAC_STATUS_RCVD_CFG))
4036 current_link_up = 1;
4038 tg3_setup_flow_control(tp, 0, 0);
4040 /* Forcing 1000FD link up. */
4041 current_link_up = 1;
4043 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4046 tw32_f(MAC_MODE, tp->mac_mode);
4051 return current_link_up;
4054 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4057 u16 orig_active_speed;
4058 u8 orig_active_duplex;
4060 int current_link_up;
4063 orig_pause_cfg = tp->link_config.active_flowctrl;
4064 orig_active_speed = tp->link_config.active_speed;
4065 orig_active_duplex = tp->link_config.active_duplex;
4067 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
4068 netif_carrier_ok(tp->dev) &&
4069 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
4070 mac_status = tr32(MAC_STATUS);
4071 mac_status &= (MAC_STATUS_PCS_SYNCED |
4072 MAC_STATUS_SIGNAL_DET |
4073 MAC_STATUS_CFG_CHANGED |
4074 MAC_STATUS_RCVD_CFG);
4075 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4076 MAC_STATUS_SIGNAL_DET)) {
4077 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4078 MAC_STATUS_CFG_CHANGED));
4083 tw32_f(MAC_TX_AUTO_NEG, 0);
4085 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4086 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4087 tw32_f(MAC_MODE, tp->mac_mode);
4090 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4091 tg3_init_bcm8002(tp);
4093 /* Enable link change event even when serdes polling. */
4094 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4097 current_link_up = 0;
4098 mac_status = tr32(MAC_STATUS);
4100 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4101 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4103 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4105 tp->napi[0].hw_status->status =
4106 (SD_STATUS_UPDATED |
4107 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4109 for (i = 0; i < 100; i++) {
4110 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4111 MAC_STATUS_CFG_CHANGED));
4113 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4114 MAC_STATUS_CFG_CHANGED |
4115 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4119 mac_status = tr32(MAC_STATUS);
4120 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4121 current_link_up = 0;
4122 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4123 tp->serdes_counter == 0) {
4124 tw32_f(MAC_MODE, (tp->mac_mode |
4125 MAC_MODE_SEND_CONFIGS));
4127 tw32_f(MAC_MODE, tp->mac_mode);
4131 if (current_link_up == 1) {
4132 tp->link_config.active_speed = SPEED_1000;
4133 tp->link_config.active_duplex = DUPLEX_FULL;
4134 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4135 LED_CTRL_LNKLED_OVERRIDE |
4136 LED_CTRL_1000MBPS_ON));
4138 tp->link_config.active_speed = SPEED_INVALID;
4139 tp->link_config.active_duplex = DUPLEX_INVALID;
4140 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4141 LED_CTRL_LNKLED_OVERRIDE |
4142 LED_CTRL_TRAFFIC_OVERRIDE));
4145 if (current_link_up != netif_carrier_ok(tp->dev)) {
4146 if (current_link_up)
4147 netif_carrier_on(tp->dev);
4149 netif_carrier_off(tp->dev);
4150 tg3_link_report(tp);
4152 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4153 if (orig_pause_cfg != now_pause_cfg ||
4154 orig_active_speed != tp->link_config.active_speed ||
4155 orig_active_duplex != tp->link_config.active_duplex)
4156 tg3_link_report(tp);
4162 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4164 int current_link_up, err = 0;
4168 u32 local_adv, remote_adv;
4170 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4171 tw32_f(MAC_MODE, tp->mac_mode);
4177 (MAC_STATUS_SYNC_CHANGED |
4178 MAC_STATUS_CFG_CHANGED |
4179 MAC_STATUS_MI_COMPLETION |
4180 MAC_STATUS_LNKSTATE_CHANGED));
4186 current_link_up = 0;
4187 current_speed = SPEED_INVALID;
4188 current_duplex = DUPLEX_INVALID;
4190 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4191 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4193 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4194 bmsr |= BMSR_LSTATUS;
4196 bmsr &= ~BMSR_LSTATUS;
4199 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4201 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4202 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4203 /* do nothing, just check for link up at the end */
4204 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4207 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4208 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4209 ADVERTISE_1000XPAUSE |
4210 ADVERTISE_1000XPSE_ASYM |
4213 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4215 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4216 new_adv |= ADVERTISE_1000XHALF;
4217 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4218 new_adv |= ADVERTISE_1000XFULL;
4220 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4221 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4222 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4223 tg3_writephy(tp, MII_BMCR, bmcr);
4225 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4226 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4227 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4234 bmcr &= ~BMCR_SPEED1000;
4235 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4237 if (tp->link_config.duplex == DUPLEX_FULL)
4238 new_bmcr |= BMCR_FULLDPLX;
4240 if (new_bmcr != bmcr) {
4241 /* BMCR_SPEED1000 is a reserved bit that needs
4242 * to be set on write.
4244 new_bmcr |= BMCR_SPEED1000;
4246 /* Force a linkdown */
4247 if (netif_carrier_ok(tp->dev)) {
4250 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4251 adv &= ~(ADVERTISE_1000XFULL |
4252 ADVERTISE_1000XHALF |
4254 tg3_writephy(tp, MII_ADVERTISE, adv);
4255 tg3_writephy(tp, MII_BMCR, bmcr |
4259 netif_carrier_off(tp->dev);
4261 tg3_writephy(tp, MII_BMCR, new_bmcr);
4263 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4264 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4265 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4267 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4268 bmsr |= BMSR_LSTATUS;
4270 bmsr &= ~BMSR_LSTATUS;
4272 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4276 if (bmsr & BMSR_LSTATUS) {
4277 current_speed = SPEED_1000;
4278 current_link_up = 1;
4279 if (bmcr & BMCR_FULLDPLX)
4280 current_duplex = DUPLEX_FULL;
4282 current_duplex = DUPLEX_HALF;
4287 if (bmcr & BMCR_ANENABLE) {
4290 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4291 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4292 common = local_adv & remote_adv;
4293 if (common & (ADVERTISE_1000XHALF |
4294 ADVERTISE_1000XFULL)) {
4295 if (common & ADVERTISE_1000XFULL)
4296 current_duplex = DUPLEX_FULL;
4298 current_duplex = DUPLEX_HALF;
4299 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4300 /* Link is up via parallel detect */
4302 current_link_up = 0;
4307 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4308 tg3_setup_flow_control(tp, local_adv, remote_adv);
4310 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4311 if (tp->link_config.active_duplex == DUPLEX_HALF)
4312 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4314 tw32_f(MAC_MODE, tp->mac_mode);
4317 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4319 tp->link_config.active_speed = current_speed;
4320 tp->link_config.active_duplex = current_duplex;
4322 if (current_link_up != netif_carrier_ok(tp->dev)) {
4323 if (current_link_up)
4324 netif_carrier_on(tp->dev);
4326 netif_carrier_off(tp->dev);
4327 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4329 tg3_link_report(tp);
4334 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4336 if (tp->serdes_counter) {
4337 /* Give autoneg time to complete. */
4338 tp->serdes_counter--;
4342 if (!netif_carrier_ok(tp->dev) &&
4343 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4346 tg3_readphy(tp, MII_BMCR, &bmcr);
4347 if (bmcr & BMCR_ANENABLE) {
4350 /* Select shadow register 0x1f */
4351 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4352 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4354 /* Select expansion interrupt status register */
4355 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4356 MII_TG3_DSP_EXP1_INT_STAT);
4357 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4358 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4360 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4361 /* We have signal detect and not receiving
4362 * config code words, link is up by parallel
4366 bmcr &= ~BMCR_ANENABLE;
4367 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4368 tg3_writephy(tp, MII_BMCR, bmcr);
4369 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4372 } else if (netif_carrier_ok(tp->dev) &&
4373 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4374 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4377 /* Select expansion interrupt status register */
4378 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4379 MII_TG3_DSP_EXP1_INT_STAT);
4380 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4384 /* Config code words received, turn on autoneg. */
4385 tg3_readphy(tp, MII_BMCR, &bmcr);
4386 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4388 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4394 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4399 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4400 err = tg3_setup_fiber_phy(tp, force_reset);
4401 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4402 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4404 err = tg3_setup_copper_phy(tp, force_reset);
4406 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4409 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4410 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4412 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4417 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4418 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4419 tw32(GRC_MISC_CFG, val);
4422 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4423 (6 << TX_LENGTHS_IPG_SHIFT);
4424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4425 val |= tr32(MAC_TX_LENGTHS) &
4426 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4427 TX_LENGTHS_CNT_DWN_VAL_MSK);
4429 if (tp->link_config.active_speed == SPEED_1000 &&
4430 tp->link_config.active_duplex == DUPLEX_HALF)
4431 tw32(MAC_TX_LENGTHS, val |
4432 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4434 tw32(MAC_TX_LENGTHS, val |
4435 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4437 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4438 if (netif_carrier_ok(tp->dev)) {
4439 tw32(HOSTCC_STAT_COAL_TICKS,
4440 tp->coal.stats_block_coalesce_usecs);
4442 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4446 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4447 val = tr32(PCIE_PWR_MGMT_THRESH);
4448 if (!netif_carrier_ok(tp->dev))
4449 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4452 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4453 tw32(PCIE_PWR_MGMT_THRESH, val);
4459 static inline int tg3_irq_sync(struct tg3 *tp)
4461 return tp->irq_sync;
4464 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4468 dst = (u32 *)((u8 *)dst + off);
4469 for (i = 0; i < len; i += sizeof(u32))
4470 *dst++ = tr32(off + i);
4473 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4475 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4476 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4477 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4478 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4479 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4480 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4481 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4482 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4483 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4484 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4485 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4486 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4487 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4488 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4489 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4490 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4491 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4492 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4493 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4495 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)
4496 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4498 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4499 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4500 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4501 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4502 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4503 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4504 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4505 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4507 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4508 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4509 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4510 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4513 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4514 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4515 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4516 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4517 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4519 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4520 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4523 static void tg3_dump_state(struct tg3 *tp)
4528 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4530 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4534 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4535 /* Read up to but not including private PCI registers */
4536 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4537 regs[i / sizeof(u32)] = tr32(i);
4539 tg3_dump_legacy_regs(tp, regs);
4541 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4542 if (!regs[i + 0] && !regs[i + 1] &&
4543 !regs[i + 2] && !regs[i + 3])
4546 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4548 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4553 for (i = 0; i < tp->irq_cnt; i++) {
4554 struct tg3_napi *tnapi = &tp->napi[i];
4556 /* SW status block */
4558 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4560 tnapi->hw_status->status,
4561 tnapi->hw_status->status_tag,
4562 tnapi->hw_status->rx_jumbo_consumer,
4563 tnapi->hw_status->rx_consumer,
4564 tnapi->hw_status->rx_mini_consumer,
4565 tnapi->hw_status->idx[0].rx_producer,
4566 tnapi->hw_status->idx[0].tx_consumer);
4569 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4571 tnapi->last_tag, tnapi->last_irq_tag,
4572 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4574 tnapi->prodring.rx_std_prod_idx,
4575 tnapi->prodring.rx_std_cons_idx,
4576 tnapi->prodring.rx_jmb_prod_idx,
4577 tnapi->prodring.rx_jmb_cons_idx);
4581 /* This is called whenever we suspect that the system chipset is re-
4582 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4583 * is bogus tx completions. We try to recover by setting the
4584 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4587 static void tg3_tx_recover(struct tg3 *tp)
4589 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4590 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4592 netdev_warn(tp->dev,
4593 "The system may be re-ordering memory-mapped I/O "
4594 "cycles to the network device, attempting to recover. "
4595 "Please report the problem to the driver maintainer "
4596 "and include system chipset information.\n");
4598 spin_lock(&tp->lock);
4599 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4600 spin_unlock(&tp->lock);
4603 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4605 /* Tell compiler to fetch tx indices from memory. */
4607 return tnapi->tx_pending -
4608 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4611 /* Tigon3 never reports partial packet sends. So we do not
4612 * need special logic to handle SKBs that have not had all
4613 * of their frags sent yet, like SunGEM does.
4615 static void tg3_tx(struct tg3_napi *tnapi)
4617 struct tg3 *tp = tnapi->tp;
4618 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4619 u32 sw_idx = tnapi->tx_cons;
4620 struct netdev_queue *txq;
4621 int index = tnapi - tp->napi;
4623 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4626 txq = netdev_get_tx_queue(tp->dev, index);
4628 while (sw_idx != hw_idx) {
4629 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4630 struct sk_buff *skb = ri->skb;
4633 if (unlikely(skb == NULL)) {
4638 pci_unmap_single(tp->pdev,
4639 dma_unmap_addr(ri, mapping),
4645 sw_idx = NEXT_TX(sw_idx);
4647 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4648 ri = &tnapi->tx_buffers[sw_idx];
4649 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4652 pci_unmap_page(tp->pdev,
4653 dma_unmap_addr(ri, mapping),
4654 skb_shinfo(skb)->frags[i].size,
4656 sw_idx = NEXT_TX(sw_idx);
4661 if (unlikely(tx_bug)) {
4667 tnapi->tx_cons = sw_idx;
4669 /* Need to make the tx_cons update visible to tg3_start_xmit()
4670 * before checking for netif_queue_stopped(). Without the
4671 * memory barrier, there is a small possibility that tg3_start_xmit()
4672 * will miss it and cause the queue to be stopped forever.
4676 if (unlikely(netif_tx_queue_stopped(txq) &&
4677 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4678 __netif_tx_lock(txq, smp_processor_id());
4679 if (netif_tx_queue_stopped(txq) &&
4680 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4681 netif_tx_wake_queue(txq);
4682 __netif_tx_unlock(txq);
4686 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4691 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4692 map_sz, PCI_DMA_FROMDEVICE);
4693 dev_kfree_skb_any(ri->skb);
4697 /* Returns size of skb allocated or < 0 on error.
4699 * We only need to fill in the address because the other members
4700 * of the RX descriptor are invariant, see tg3_init_rings.
4702 * Note the purposeful assymetry of cpu vs. chip accesses. For
4703 * posting buffers we only dirty the first cache line of the RX
4704 * descriptor (containing the address). Whereas for the RX status
4705 * buffers the cpu only reads the last cacheline of the RX descriptor
4706 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4708 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4709 u32 opaque_key, u32 dest_idx_unmasked)
4711 struct tg3_rx_buffer_desc *desc;
4712 struct ring_info *map;
4713 struct sk_buff *skb;
4715 int skb_size, dest_idx;
4717 switch (opaque_key) {
4718 case RXD_OPAQUE_RING_STD:
4719 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4720 desc = &tpr->rx_std[dest_idx];
4721 map = &tpr->rx_std_buffers[dest_idx];
4722 skb_size = tp->rx_pkt_map_sz;
4725 case RXD_OPAQUE_RING_JUMBO:
4726 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4727 desc = &tpr->rx_jmb[dest_idx].std;
4728 map = &tpr->rx_jmb_buffers[dest_idx];
4729 skb_size = TG3_RX_JMB_MAP_SZ;
4736 /* Do not overwrite any of the map or rp information
4737 * until we are sure we can commit to a new buffer.
4739 * Callers depend upon this behavior and assume that
4740 * we leave everything unchanged if we fail.
4742 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4746 skb_reserve(skb, tp->rx_offset);
4748 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4749 PCI_DMA_FROMDEVICE);
4750 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4756 dma_unmap_addr_set(map, mapping, mapping);
4758 desc->addr_hi = ((u64)mapping >> 32);
4759 desc->addr_lo = ((u64)mapping & 0xffffffff);
4764 /* We only need to move over in the address because the other
4765 * members of the RX descriptor are invariant. See notes above
4766 * tg3_alloc_rx_skb for full details.
4768 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4769 struct tg3_rx_prodring_set *dpr,
4770 u32 opaque_key, int src_idx,
4771 u32 dest_idx_unmasked)
4773 struct tg3 *tp = tnapi->tp;
4774 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4775 struct ring_info *src_map, *dest_map;
4776 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4779 switch (opaque_key) {
4780 case RXD_OPAQUE_RING_STD:
4781 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4782 dest_desc = &dpr->rx_std[dest_idx];
4783 dest_map = &dpr->rx_std_buffers[dest_idx];
4784 src_desc = &spr->rx_std[src_idx];
4785 src_map = &spr->rx_std_buffers[src_idx];
4788 case RXD_OPAQUE_RING_JUMBO:
4789 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4790 dest_desc = &dpr->rx_jmb[dest_idx].std;
4791 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4792 src_desc = &spr->rx_jmb[src_idx].std;
4793 src_map = &spr->rx_jmb_buffers[src_idx];
4800 dest_map->skb = src_map->skb;
4801 dma_unmap_addr_set(dest_map, mapping,
4802 dma_unmap_addr(src_map, mapping));
4803 dest_desc->addr_hi = src_desc->addr_hi;
4804 dest_desc->addr_lo = src_desc->addr_lo;
4806 /* Ensure that the update to the skb happens after the physical
4807 * addresses have been transferred to the new BD location.
4811 src_map->skb = NULL;
4814 /* The RX ring scheme is composed of multiple rings which post fresh
4815 * buffers to the chip, and one special ring the chip uses to report
4816 * status back to the host.
4818 * The special ring reports the status of received packets to the
4819 * host. The chip does not write into the original descriptor the
4820 * RX buffer was obtained from. The chip simply takes the original
4821 * descriptor as provided by the host, updates the status and length
4822 * field, then writes this into the next status ring entry.
4824 * Each ring the host uses to post buffers to the chip is described
4825 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4826 * it is first placed into the on-chip ram. When the packet's length
4827 * is known, it walks down the TG3_BDINFO entries to select the ring.
4828 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4829 * which is within the range of the new packet's length is chosen.
4831 * The "separate ring for rx status" scheme may sound queer, but it makes
4832 * sense from a cache coherency perspective. If only the host writes
4833 * to the buffer post rings, and only the chip writes to the rx status
4834 * rings, then cache lines never move beyond shared-modified state.
4835 * If both the host and chip were to write into the same ring, cache line
4836 * eviction could occur since both entities want it in an exclusive state.
4838 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4840 struct tg3 *tp = tnapi->tp;
4841 u32 work_mask, rx_std_posted = 0;
4842 u32 std_prod_idx, jmb_prod_idx;
4843 u32 sw_idx = tnapi->rx_rcb_ptr;
4846 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4848 hw_idx = *(tnapi->rx_rcb_prod_idx);
4850 * We need to order the read of hw_idx and the read of
4851 * the opaque cookie.
4856 std_prod_idx = tpr->rx_std_prod_idx;
4857 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4858 while (sw_idx != hw_idx && budget > 0) {
4859 struct ring_info *ri;
4860 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4862 struct sk_buff *skb;
4863 dma_addr_t dma_addr;
4864 u32 opaque_key, desc_idx, *post_ptr;
4866 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4867 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4868 if (opaque_key == RXD_OPAQUE_RING_STD) {
4869 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4870 dma_addr = dma_unmap_addr(ri, mapping);
4872 post_ptr = &std_prod_idx;
4874 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4875 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4876 dma_addr = dma_unmap_addr(ri, mapping);
4878 post_ptr = &jmb_prod_idx;
4880 goto next_pkt_nopost;
4882 work_mask |= opaque_key;
4884 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4885 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4887 tg3_recycle_rx(tnapi, tpr, opaque_key,
4888 desc_idx, *post_ptr);
4890 /* Other statistics kept track of by card. */
4895 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4898 if (len > TG3_RX_COPY_THRESH(tp)) {
4901 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4906 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4907 PCI_DMA_FROMDEVICE);
4909 /* Ensure that the update to the skb happens
4910 * after the usage of the old DMA mapping.
4918 struct sk_buff *copy_skb;
4920 tg3_recycle_rx(tnapi, tpr, opaque_key,
4921 desc_idx, *post_ptr);
4923 copy_skb = netdev_alloc_skb(tp->dev, len +
4925 if (copy_skb == NULL)
4926 goto drop_it_no_recycle;
4928 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4929 skb_put(copy_skb, len);
4930 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4931 skb_copy_from_linear_data(skb, copy_skb->data, len);
4932 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4934 /* We'll reuse the original ring buffer. */
4938 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4939 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4940 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4941 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4942 skb->ip_summed = CHECKSUM_UNNECESSARY;
4944 skb_checksum_none_assert(skb);
4946 skb->protocol = eth_type_trans(skb, tp->dev);
4948 if (len > (tp->dev->mtu + ETH_HLEN) &&
4949 skb->protocol != htons(ETH_P_8021Q)) {
4951 goto drop_it_no_recycle;
4954 if (desc->type_flags & RXD_FLAG_VLAN &&
4955 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4956 __vlan_hwaccel_put_tag(skb,
4957 desc->err_vlan & RXD_VLAN_MASK);
4959 napi_gro_receive(&tnapi->napi, skb);
4967 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4968 tpr->rx_std_prod_idx = std_prod_idx &
4969 tp->rx_std_ring_mask;
4970 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4971 tpr->rx_std_prod_idx);
4972 work_mask &= ~RXD_OPAQUE_RING_STD;
4977 sw_idx &= tp->rx_ret_ring_mask;
4979 /* Refresh hw_idx to see if there is new work */
4980 if (sw_idx == hw_idx) {
4981 hw_idx = *(tnapi->rx_rcb_prod_idx);
4986 /* ACK the status ring. */
4987 tnapi->rx_rcb_ptr = sw_idx;
4988 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4990 /* Refill RX ring(s). */
4991 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4992 if (work_mask & RXD_OPAQUE_RING_STD) {
4993 tpr->rx_std_prod_idx = std_prod_idx &
4994 tp->rx_std_ring_mask;
4995 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4996 tpr->rx_std_prod_idx);
4998 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4999 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5000 tp->rx_jmb_ring_mask;
5001 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5002 tpr->rx_jmb_prod_idx);
5005 } else if (work_mask) {
5006 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5007 * updated before the producer indices can be updated.
5011 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5012 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5014 if (tnapi != &tp->napi[1])
5015 napi_schedule(&tp->napi[1].napi);
5021 static void tg3_poll_link(struct tg3 *tp)
5023 /* handle link change and other phy events */
5024 if (!(tp->tg3_flags &
5025 (TG3_FLAG_USE_LINKCHG_REG |
5026 TG3_FLAG_POLL_SERDES))) {
5027 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5029 if (sblk->status & SD_STATUS_LINK_CHG) {
5030 sblk->status = SD_STATUS_UPDATED |
5031 (sblk->status & ~SD_STATUS_LINK_CHG);
5032 spin_lock(&tp->lock);
5033 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
5035 (MAC_STATUS_SYNC_CHANGED |
5036 MAC_STATUS_CFG_CHANGED |
5037 MAC_STATUS_MI_COMPLETION |
5038 MAC_STATUS_LNKSTATE_CHANGED));
5041 tg3_setup_phy(tp, 0);
5042 spin_unlock(&tp->lock);
5047 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5048 struct tg3_rx_prodring_set *dpr,
5049 struct tg3_rx_prodring_set *spr)
5051 u32 si, di, cpycnt, src_prod_idx;
5055 src_prod_idx = spr->rx_std_prod_idx;
5057 /* Make sure updates to the rx_std_buffers[] entries and the
5058 * standard producer index are seen in the correct order.
5062 if (spr->rx_std_cons_idx == src_prod_idx)
5065 if (spr->rx_std_cons_idx < src_prod_idx)
5066 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5068 cpycnt = tp->rx_std_ring_mask + 1 -
5069 spr->rx_std_cons_idx;
5071 cpycnt = min(cpycnt,
5072 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5074 si = spr->rx_std_cons_idx;
5075 di = dpr->rx_std_prod_idx;
5077 for (i = di; i < di + cpycnt; i++) {
5078 if (dpr->rx_std_buffers[i].skb) {
5088 /* Ensure that updates to the rx_std_buffers ring and the
5089 * shadowed hardware producer ring from tg3_recycle_skb() are
5090 * ordered correctly WRT the skb check above.
5094 memcpy(&dpr->rx_std_buffers[di],
5095 &spr->rx_std_buffers[si],
5096 cpycnt * sizeof(struct ring_info));
5098 for (i = 0; i < cpycnt; i++, di++, si++) {
5099 struct tg3_rx_buffer_desc *sbd, *dbd;
5100 sbd = &spr->rx_std[si];
5101 dbd = &dpr->rx_std[di];
5102 dbd->addr_hi = sbd->addr_hi;
5103 dbd->addr_lo = sbd->addr_lo;
5106 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5107 tp->rx_std_ring_mask;
5108 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5109 tp->rx_std_ring_mask;
5113 src_prod_idx = spr->rx_jmb_prod_idx;
5115 /* Make sure updates to the rx_jmb_buffers[] entries and
5116 * the jumbo producer index are seen in the correct order.
5120 if (spr->rx_jmb_cons_idx == src_prod_idx)
5123 if (spr->rx_jmb_cons_idx < src_prod_idx)
5124 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5126 cpycnt = tp->rx_jmb_ring_mask + 1 -
5127 spr->rx_jmb_cons_idx;
5129 cpycnt = min(cpycnt,
5130 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5132 si = spr->rx_jmb_cons_idx;
5133 di = dpr->rx_jmb_prod_idx;
5135 for (i = di; i < di + cpycnt; i++) {
5136 if (dpr->rx_jmb_buffers[i].skb) {
5146 /* Ensure that updates to the rx_jmb_buffers ring and the
5147 * shadowed hardware producer ring from tg3_recycle_skb() are
5148 * ordered correctly WRT the skb check above.
5152 memcpy(&dpr->rx_jmb_buffers[di],
5153 &spr->rx_jmb_buffers[si],
5154 cpycnt * sizeof(struct ring_info));
5156 for (i = 0; i < cpycnt; i++, di++, si++) {
5157 struct tg3_rx_buffer_desc *sbd, *dbd;
5158 sbd = &spr->rx_jmb[si].std;
5159 dbd = &dpr->rx_jmb[di].std;
5160 dbd->addr_hi = sbd->addr_hi;
5161 dbd->addr_lo = sbd->addr_lo;
5164 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5165 tp->rx_jmb_ring_mask;
5166 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5167 tp->rx_jmb_ring_mask;
5173 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5175 struct tg3 *tp = tnapi->tp;
5177 /* run TX completion thread */
5178 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5180 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5184 /* run RX thread, within the bounds set by NAPI.
5185 * All RX "locking" is done by ensuring outside
5186 * code synchronizes with tg3->napi.poll()
5188 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5189 work_done += tg3_rx(tnapi, budget - work_done);
5191 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
5192 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5194 u32 std_prod_idx = dpr->rx_std_prod_idx;
5195 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5197 for (i = 1; i < tp->irq_cnt; i++)
5198 err |= tg3_rx_prodring_xfer(tp, dpr,
5199 &tp->napi[i].prodring);
5203 if (std_prod_idx != dpr->rx_std_prod_idx)
5204 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5205 dpr->rx_std_prod_idx);
5207 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5208 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5209 dpr->rx_jmb_prod_idx);
5214 tw32_f(HOSTCC_MODE, tp->coal_now);
5220 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5222 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5223 struct tg3 *tp = tnapi->tp;
5225 struct tg3_hw_status *sblk = tnapi->hw_status;
5228 work_done = tg3_poll_work(tnapi, work_done, budget);
5230 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5233 if (unlikely(work_done >= budget))
5236 /* tp->last_tag is used in tg3_int_reenable() below
5237 * to tell the hw how much work has been processed,
5238 * so we must read it before checking for more work.
5240 tnapi->last_tag = sblk->status_tag;
5241 tnapi->last_irq_tag = tnapi->last_tag;
5244 /* check for RX/TX work to do */
5245 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5246 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5247 napi_complete(napi);
5248 /* Reenable interrupts. */
5249 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5258 /* work_done is guaranteed to be less than budget. */
5259 napi_complete(napi);
5260 schedule_work(&tp->reset_task);
5264 static void tg3_process_error(struct tg3 *tp)
5267 bool real_error = false;
5269 if (tp->tg3_flags & TG3_FLAG_ERROR_PROCESSED)
5272 /* Check Flow Attention register */
5273 val = tr32(HOSTCC_FLOW_ATTN);
5274 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5275 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5279 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5280 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5284 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5285 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5294 tp->tg3_flags |= TG3_FLAG_ERROR_PROCESSED;
5295 schedule_work(&tp->reset_task);
5298 static int tg3_poll(struct napi_struct *napi, int budget)
5300 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5301 struct tg3 *tp = tnapi->tp;
5303 struct tg3_hw_status *sblk = tnapi->hw_status;
5306 if (sblk->status & SD_STATUS_ERROR)
5307 tg3_process_error(tp);
5311 work_done = tg3_poll_work(tnapi, work_done, budget);
5313 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5316 if (unlikely(work_done >= budget))
5319 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5320 /* tp->last_tag is used in tg3_int_reenable() below
5321 * to tell the hw how much work has been processed,
5322 * so we must read it before checking for more work.
5324 tnapi->last_tag = sblk->status_tag;
5325 tnapi->last_irq_tag = tnapi->last_tag;
5328 sblk->status &= ~SD_STATUS_UPDATED;
5330 if (likely(!tg3_has_work(tnapi))) {
5331 napi_complete(napi);
5332 tg3_int_reenable(tnapi);
5340 /* work_done is guaranteed to be less than budget. */
5341 napi_complete(napi);
5342 schedule_work(&tp->reset_task);
5346 static void tg3_napi_disable(struct tg3 *tp)
5350 for (i = tp->irq_cnt - 1; i >= 0; i--)
5351 napi_disable(&tp->napi[i].napi);
5354 static void tg3_napi_enable(struct tg3 *tp)
5358 for (i = 0; i < tp->irq_cnt; i++)
5359 napi_enable(&tp->napi[i].napi);
5362 static void tg3_napi_init(struct tg3 *tp)
5366 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5367 for (i = 1; i < tp->irq_cnt; i++)
5368 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5371 static void tg3_napi_fini(struct tg3 *tp)
5375 for (i = 0; i < tp->irq_cnt; i++)
5376 netif_napi_del(&tp->napi[i].napi);
5379 static inline void tg3_netif_stop(struct tg3 *tp)
5381 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5382 tg3_napi_disable(tp);
5383 netif_tx_disable(tp->dev);
5386 static inline void tg3_netif_start(struct tg3 *tp)
5388 /* NOTE: unconditional netif_tx_wake_all_queues is only
5389 * appropriate so long as all callers are assured to
5390 * have free tx slots (such as after tg3_init_hw)
5392 netif_tx_wake_all_queues(tp->dev);
5394 tg3_napi_enable(tp);
5395 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5396 tg3_enable_ints(tp);
5399 static void tg3_irq_quiesce(struct tg3 *tp)
5403 BUG_ON(tp->irq_sync);
5408 for (i = 0; i < tp->irq_cnt; i++)
5409 synchronize_irq(tp->napi[i].irq_vec);
5412 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5413 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5414 * with as well. Most of the time, this is not necessary except when
5415 * shutting down the device.
5417 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5419 spin_lock_bh(&tp->lock);
5421 tg3_irq_quiesce(tp);
5424 static inline void tg3_full_unlock(struct tg3 *tp)
5426 spin_unlock_bh(&tp->lock);
5429 /* One-shot MSI handler - Chip automatically disables interrupt
5430 * after sending MSI so driver doesn't have to do it.
5432 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5434 struct tg3_napi *tnapi = dev_id;
5435 struct tg3 *tp = tnapi->tp;
5437 prefetch(tnapi->hw_status);
5439 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5441 if (likely(!tg3_irq_sync(tp)))
5442 napi_schedule(&tnapi->napi);
5447 /* MSI ISR - No need to check for interrupt sharing and no need to
5448 * flush status block and interrupt mailbox. PCI ordering rules
5449 * guarantee that MSI will arrive after the status block.
5451 static irqreturn_t tg3_msi(int irq, void *dev_id)
5453 struct tg3_napi *tnapi = dev_id;
5454 struct tg3 *tp = tnapi->tp;
5456 prefetch(tnapi->hw_status);
5458 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5460 * Writing any value to intr-mbox-0 clears PCI INTA# and
5461 * chip-internal interrupt pending events.
5462 * Writing non-zero to intr-mbox-0 additional tells the
5463 * NIC to stop sending us irqs, engaging "in-intr-handler"
5466 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5467 if (likely(!tg3_irq_sync(tp)))
5468 napi_schedule(&tnapi->napi);
5470 return IRQ_RETVAL(1);
5473 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5475 struct tg3_napi *tnapi = dev_id;
5476 struct tg3 *tp = tnapi->tp;
5477 struct tg3_hw_status *sblk = tnapi->hw_status;
5478 unsigned int handled = 1;
5480 /* In INTx mode, it is possible for the interrupt to arrive at
5481 * the CPU before the status block posted prior to the interrupt.
5482 * Reading the PCI State register will confirm whether the
5483 * interrupt is ours and will flush the status block.
5485 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5486 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5487 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5494 * Writing any value to intr-mbox-0 clears PCI INTA# and
5495 * chip-internal interrupt pending events.
5496 * Writing non-zero to intr-mbox-0 additional tells the
5497 * NIC to stop sending us irqs, engaging "in-intr-handler"
5500 * Flush the mailbox to de-assert the IRQ immediately to prevent
5501 * spurious interrupts. The flush impacts performance but
5502 * excessive spurious interrupts can be worse in some cases.
5504 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5505 if (tg3_irq_sync(tp))
5507 sblk->status &= ~SD_STATUS_UPDATED;
5508 if (likely(tg3_has_work(tnapi))) {
5509 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5510 napi_schedule(&tnapi->napi);
5512 /* No work, shared interrupt perhaps? re-enable
5513 * interrupts, and flush that PCI write
5515 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5519 return IRQ_RETVAL(handled);
5522 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5524 struct tg3_napi *tnapi = dev_id;
5525 struct tg3 *tp = tnapi->tp;
5526 struct tg3_hw_status *sblk = tnapi->hw_status;
5527 unsigned int handled = 1;
5529 /* In INTx mode, it is possible for the interrupt to arrive at
5530 * the CPU before the status block posted prior to the interrupt.
5531 * Reading the PCI State register will confirm whether the
5532 * interrupt is ours and will flush the status block.
5534 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5535 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5536 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5543 * writing any value to intr-mbox-0 clears PCI INTA# and
5544 * chip-internal interrupt pending events.
5545 * writing non-zero to intr-mbox-0 additional tells the
5546 * NIC to stop sending us irqs, engaging "in-intr-handler"
5549 * Flush the mailbox to de-assert the IRQ immediately to prevent
5550 * spurious interrupts. The flush impacts performance but
5551 * excessive spurious interrupts can be worse in some cases.
5553 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5556 * In a shared interrupt configuration, sometimes other devices'
5557 * interrupts will scream. We record the current status tag here
5558 * so that the above check can report that the screaming interrupts
5559 * are unhandled. Eventually they will be silenced.
5561 tnapi->last_irq_tag = sblk->status_tag;
5563 if (tg3_irq_sync(tp))
5566 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5568 napi_schedule(&tnapi->napi);
5571 return IRQ_RETVAL(handled);
5574 /* ISR for interrupt test */
5575 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5577 struct tg3_napi *tnapi = dev_id;
5578 struct tg3 *tp = tnapi->tp;
5579 struct tg3_hw_status *sblk = tnapi->hw_status;
5581 if ((sblk->status & SD_STATUS_UPDATED) ||
5582 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5583 tg3_disable_ints(tp);
5584 return IRQ_RETVAL(1);
5586 return IRQ_RETVAL(0);
5589 static int tg3_init_hw(struct tg3 *, int);
5590 static int tg3_halt(struct tg3 *, int, int);
5592 /* Restart hardware after configuration changes, self-test, etc.
5593 * Invoked with tp->lock held.
5595 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5596 __releases(tp->lock)
5597 __acquires(tp->lock)
5601 err = tg3_init_hw(tp, reset_phy);
5604 "Failed to re-initialize device, aborting\n");
5605 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5606 tg3_full_unlock(tp);
5607 del_timer_sync(&tp->timer);
5609 tg3_napi_enable(tp);
5611 tg3_full_lock(tp, 0);
5616 #ifdef CONFIG_NET_POLL_CONTROLLER
5617 static void tg3_poll_controller(struct net_device *dev)
5620 struct tg3 *tp = netdev_priv(dev);
5622 for (i = 0; i < tp->irq_cnt; i++)
5623 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5627 static void tg3_reset_task(struct work_struct *work)
5629 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5631 unsigned int restart_timer;
5633 tg3_full_lock(tp, 0);
5635 if (!netif_running(tp->dev)) {
5636 tg3_full_unlock(tp);
5640 tg3_full_unlock(tp);
5646 tg3_full_lock(tp, 1);
5648 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5649 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5651 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5652 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5653 tp->write32_rx_mbox = tg3_write_flush_reg32;
5654 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5655 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5658 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5659 err = tg3_init_hw(tp, 1);
5663 tg3_netif_start(tp);
5666 mod_timer(&tp->timer, jiffies + 1);
5669 tg3_full_unlock(tp);
5675 static void tg3_tx_timeout(struct net_device *dev)
5677 struct tg3 *tp = netdev_priv(dev);
5679 if (netif_msg_tx_err(tp)) {
5680 netdev_err(dev, "transmit timed out, resetting\n");
5684 schedule_work(&tp->reset_task);
5687 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5688 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5690 u32 base = (u32) mapping & 0xffffffff;
5692 return (base > 0xffffdcc0) && (base + len + 8 < base);
5695 /* Test for DMA addresses > 40-bit */
5696 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5699 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5700 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5701 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5708 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5710 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5711 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5712 struct sk_buff *skb, u32 last_plus_one,
5713 u32 *start, u32 base_flags, u32 mss)
5715 struct tg3 *tp = tnapi->tp;
5716 struct sk_buff *new_skb;
5717 dma_addr_t new_addr = 0;
5721 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5722 new_skb = skb_copy(skb, GFP_ATOMIC);
5724 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5726 new_skb = skb_copy_expand(skb,
5727 skb_headroom(skb) + more_headroom,
5728 skb_tailroom(skb), GFP_ATOMIC);
5734 /* New SKB is guaranteed to be linear. */
5736 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5738 /* Make sure the mapping succeeded */
5739 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5741 dev_kfree_skb(new_skb);
5744 /* Make sure new skb does not cross any 4G boundaries.
5745 * Drop the packet if it does.
5747 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5748 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5749 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5752 dev_kfree_skb(new_skb);
5755 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5756 base_flags, 1 | (mss << 1));
5757 *start = NEXT_TX(entry);
5761 /* Now clean up the sw ring entries. */
5763 while (entry != last_plus_one) {
5767 len = skb_headlen(skb);
5769 len = skb_shinfo(skb)->frags[i-1].size;
5771 pci_unmap_single(tp->pdev,
5772 dma_unmap_addr(&tnapi->tx_buffers[entry],
5774 len, PCI_DMA_TODEVICE);
5776 tnapi->tx_buffers[entry].skb = new_skb;
5777 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5780 tnapi->tx_buffers[entry].skb = NULL;
5782 entry = NEXT_TX(entry);
5791 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5792 dma_addr_t mapping, int len, u32 flags,
5795 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5796 int is_end = (mss_and_is_end & 0x1);
5797 u32 mss = (mss_and_is_end >> 1);
5801 flags |= TXD_FLAG_END;
5802 if (flags & TXD_FLAG_VLAN) {
5803 vlan_tag = flags >> 16;
5806 vlan_tag |= (mss << TXD_MSS_SHIFT);
5808 txd->addr_hi = ((u64) mapping >> 32);
5809 txd->addr_lo = ((u64) mapping & 0xffffffff);
5810 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5811 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5814 /* hard_start_xmit for devices that don't have any bugs and
5815 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5817 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5818 struct net_device *dev)
5820 struct tg3 *tp = netdev_priv(dev);
5821 u32 len, entry, base_flags, mss;
5823 struct tg3_napi *tnapi;
5824 struct netdev_queue *txq;
5825 unsigned int i, last;
5827 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5828 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5829 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5832 /* We are running in BH disabled context with netif_tx_lock
5833 * and TX reclaim runs via tp->napi.poll inside of a software
5834 * interrupt. Furthermore, IRQ processing runs lockless so we have
5835 * no IRQ context deadlocks to worry about either. Rejoice!
5837 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5838 if (!netif_tx_queue_stopped(txq)) {
5839 netif_tx_stop_queue(txq);
5841 /* This is a hard error, log it. */
5843 "BUG! Tx Ring full when queue awake!\n");
5845 return NETDEV_TX_BUSY;
5848 entry = tnapi->tx_prod;
5850 mss = skb_shinfo(skb)->gso_size;
5852 int tcp_opt_len, ip_tcp_len;
5855 if (skb_header_cloned(skb) &&
5856 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5861 if (skb_is_gso_v6(skb)) {
5862 hdrlen = skb_headlen(skb) - ETH_HLEN;
5864 struct iphdr *iph = ip_hdr(skb);
5866 tcp_opt_len = tcp_optlen(skb);
5867 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5870 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5871 hdrlen = ip_tcp_len + tcp_opt_len;
5874 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5875 mss |= (hdrlen & 0xc) << 12;
5877 base_flags |= 0x00000010;
5878 base_flags |= (hdrlen & 0x3e0) << 5;
5882 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5883 TXD_FLAG_CPU_POST_DMA);
5885 tcp_hdr(skb)->check = 0;
5887 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5888 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5891 if (vlan_tx_tag_present(skb))
5892 base_flags |= (TXD_FLAG_VLAN |
5893 (vlan_tx_tag_get(skb) << 16));
5895 len = skb_headlen(skb);
5897 /* Queue skb data, a.k.a. the main skb fragment. */
5898 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5899 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5904 tnapi->tx_buffers[entry].skb = skb;
5905 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5907 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5908 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5909 base_flags |= TXD_FLAG_JMB_PKT;
5911 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5912 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5914 entry = NEXT_TX(entry);
5916 /* Now loop through additional data fragments, and queue them. */
5917 if (skb_shinfo(skb)->nr_frags > 0) {
5918 last = skb_shinfo(skb)->nr_frags - 1;
5919 for (i = 0; i <= last; i++) {
5920 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5923 mapping = pci_map_page(tp->pdev,
5926 len, PCI_DMA_TODEVICE);
5927 if (pci_dma_mapping_error(tp->pdev, mapping))
5930 tnapi->tx_buffers[entry].skb = NULL;
5931 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5934 tg3_set_txd(tnapi, entry, mapping, len,
5935 base_flags, (i == last) | (mss << 1));
5937 entry = NEXT_TX(entry);
5941 /* Packets are ready, update Tx producer idx local and on card. */
5942 tw32_tx_mbox(tnapi->prodmbox, entry);
5944 tnapi->tx_prod = entry;
5945 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5946 netif_tx_stop_queue(txq);
5948 /* netif_tx_stop_queue() must be done before checking
5949 * checking tx index in tg3_tx_avail() below, because in
5950 * tg3_tx(), we update tx index before checking for
5951 * netif_tx_queue_stopped().
5954 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5955 netif_tx_wake_queue(txq);
5961 return NETDEV_TX_OK;
5965 entry = tnapi->tx_prod;
5966 tnapi->tx_buffers[entry].skb = NULL;
5967 pci_unmap_single(tp->pdev,
5968 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5971 for (i = 0; i <= last; i++) {
5972 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5973 entry = NEXT_TX(entry);
5975 pci_unmap_page(tp->pdev,
5976 dma_unmap_addr(&tnapi->tx_buffers[entry],
5978 frag->size, PCI_DMA_TODEVICE);
5982 return NETDEV_TX_OK;
5985 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5986 struct net_device *);
5988 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5989 * TSO header is greater than 80 bytes.
5991 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5993 struct sk_buff *segs, *nskb;
5994 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5996 /* Estimate the number of fragments in the worst case */
5997 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5998 netif_stop_queue(tp->dev);
6000 /* netif_tx_stop_queue() must be done before checking
6001 * checking tx index in tg3_tx_avail() below, because in
6002 * tg3_tx(), we update tx index before checking for
6003 * netif_tx_queue_stopped().
6006 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6007 return NETDEV_TX_BUSY;
6009 netif_wake_queue(tp->dev);
6012 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6014 goto tg3_tso_bug_end;
6020 tg3_start_xmit_dma_bug(nskb, tp->dev);
6026 return NETDEV_TX_OK;
6029 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6030 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
6032 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6033 struct net_device *dev)
6035 struct tg3 *tp = netdev_priv(dev);
6036 u32 len, entry, base_flags, mss;
6037 int would_hit_hwbug;
6039 struct tg3_napi *tnapi;
6040 struct netdev_queue *txq;
6041 unsigned int i, last;
6043 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6044 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6045 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
6048 /* We are running in BH disabled context with netif_tx_lock
6049 * and TX reclaim runs via tp->napi.poll inside of a software
6050 * interrupt. Furthermore, IRQ processing runs lockless so we have
6051 * no IRQ context deadlocks to worry about either. Rejoice!
6053 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6054 if (!netif_tx_queue_stopped(txq)) {
6055 netif_tx_stop_queue(txq);
6057 /* This is a hard error, log it. */
6059 "BUG! Tx Ring full when queue awake!\n");
6061 return NETDEV_TX_BUSY;
6064 entry = tnapi->tx_prod;
6066 if (skb->ip_summed == CHECKSUM_PARTIAL)
6067 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6069 mss = skb_shinfo(skb)->gso_size;
6072 u32 tcp_opt_len, hdr_len;
6074 if (skb_header_cloned(skb) &&
6075 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6081 tcp_opt_len = tcp_optlen(skb);
6083 if (skb_is_gso_v6(skb)) {
6084 hdr_len = skb_headlen(skb) - ETH_HLEN;
6088 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6089 hdr_len = ip_tcp_len + tcp_opt_len;
6092 iph->tot_len = htons(mss + hdr_len);
6095 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6096 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
6097 return tg3_tso_bug(tp, skb);
6099 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6100 TXD_FLAG_CPU_POST_DMA);
6102 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
6103 tcp_hdr(skb)->check = 0;
6104 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6106 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6111 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
6112 mss |= (hdr_len & 0xc) << 12;
6114 base_flags |= 0x00000010;
6115 base_flags |= (hdr_len & 0x3e0) << 5;
6116 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
6117 mss |= hdr_len << 9;
6118 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
6119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6120 if (tcp_opt_len || iph->ihl > 5) {
6123 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6124 mss |= (tsflags << 11);
6127 if (tcp_opt_len || iph->ihl > 5) {
6130 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6131 base_flags |= tsflags << 12;
6136 if (vlan_tx_tag_present(skb))
6137 base_flags |= (TXD_FLAG_VLAN |
6138 (vlan_tx_tag_get(skb) << 16));
6140 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
6141 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6142 base_flags |= TXD_FLAG_JMB_PKT;
6144 len = skb_headlen(skb);
6146 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6147 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6152 tnapi->tx_buffers[entry].skb = skb;
6153 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6155 would_hit_hwbug = 0;
6157 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
6158 would_hit_hwbug = 1;
6160 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6161 tg3_4g_overflow_test(mapping, len))
6162 would_hit_hwbug = 1;
6164 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6165 tg3_40bit_overflow_test(tp, mapping, len))
6166 would_hit_hwbug = 1;
6168 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
6169 would_hit_hwbug = 1;
6171 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6172 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6174 entry = NEXT_TX(entry);
6176 /* Now loop through additional data fragments, and queue them. */
6177 if (skb_shinfo(skb)->nr_frags > 0) {
6178 last = skb_shinfo(skb)->nr_frags - 1;
6179 for (i = 0; i <= last; i++) {
6180 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6183 mapping = pci_map_page(tp->pdev,
6186 len, PCI_DMA_TODEVICE);
6188 tnapi->tx_buffers[entry].skb = NULL;
6189 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6191 if (pci_dma_mapping_error(tp->pdev, mapping))
6194 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
6196 would_hit_hwbug = 1;
6198 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6199 tg3_4g_overflow_test(mapping, len))
6200 would_hit_hwbug = 1;
6202 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6203 tg3_40bit_overflow_test(tp, mapping, len))
6204 would_hit_hwbug = 1;
6206 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6207 tg3_set_txd(tnapi, entry, mapping, len,
6208 base_flags, (i == last)|(mss << 1));
6210 tg3_set_txd(tnapi, entry, mapping, len,
6211 base_flags, (i == last));
6213 entry = NEXT_TX(entry);
6217 if (would_hit_hwbug) {
6218 u32 last_plus_one = entry;
6221 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6222 start &= (TG3_TX_RING_SIZE - 1);
6224 /* If the workaround fails due to memory/mapping
6225 * failure, silently drop this packet.
6227 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6228 &start, base_flags, mss))
6234 /* Packets are ready, update Tx producer idx local and on card. */
6235 tw32_tx_mbox(tnapi->prodmbox, entry);
6237 tnapi->tx_prod = entry;
6238 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6239 netif_tx_stop_queue(txq);
6241 /* netif_tx_stop_queue() must be done before checking
6242 * checking tx index in tg3_tx_avail() below, because in
6243 * tg3_tx(), we update tx index before checking for
6244 * netif_tx_queue_stopped().
6247 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6248 netif_tx_wake_queue(txq);
6254 return NETDEV_TX_OK;
6258 entry = tnapi->tx_prod;
6259 tnapi->tx_buffers[entry].skb = NULL;
6260 pci_unmap_single(tp->pdev,
6261 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6264 for (i = 0; i <= last; i++) {
6265 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6266 entry = NEXT_TX(entry);
6268 pci_unmap_page(tp->pdev,
6269 dma_unmap_addr(&tnapi->tx_buffers[entry],
6271 frag->size, PCI_DMA_TODEVICE);
6275 return NETDEV_TX_OK;
6278 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6280 struct tg3 *tp = netdev_priv(dev);
6282 if (dev->mtu > ETH_DATA_LEN && (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6283 features &= ~NETIF_F_ALL_TSO;
6288 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6293 if (new_mtu > ETH_DATA_LEN) {
6294 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6295 netdev_update_features(dev);
6296 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6298 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6301 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6302 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6303 netdev_update_features(dev);
6305 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6309 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6311 struct tg3 *tp = netdev_priv(dev);
6314 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6317 if (!netif_running(dev)) {
6318 /* We'll just catch it later when the
6321 tg3_set_mtu(dev, tp, new_mtu);
6329 tg3_full_lock(tp, 1);
6331 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6333 tg3_set_mtu(dev, tp, new_mtu);
6335 err = tg3_restart_hw(tp, 0);
6338 tg3_netif_start(tp);
6340 tg3_full_unlock(tp);
6348 static void tg3_rx_prodring_free(struct tg3 *tp,
6349 struct tg3_rx_prodring_set *tpr)
6353 if (tpr != &tp->napi[0].prodring) {
6354 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6355 i = (i + 1) & tp->rx_std_ring_mask)
6356 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6359 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6360 for (i = tpr->rx_jmb_cons_idx;
6361 i != tpr->rx_jmb_prod_idx;
6362 i = (i + 1) & tp->rx_jmb_ring_mask) {
6363 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6371 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6372 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6375 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6376 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6377 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6378 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6383 /* Initialize rx rings for packet processing.
6385 * The chip has been shut down and the driver detached from
6386 * the networking, so no interrupts or new tx packets will
6387 * end up in the driver. tp->{tx,}lock are held and thus
6390 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6391 struct tg3_rx_prodring_set *tpr)
6393 u32 i, rx_pkt_dma_sz;
6395 tpr->rx_std_cons_idx = 0;
6396 tpr->rx_std_prod_idx = 0;
6397 tpr->rx_jmb_cons_idx = 0;
6398 tpr->rx_jmb_prod_idx = 0;
6400 if (tpr != &tp->napi[0].prodring) {
6401 memset(&tpr->rx_std_buffers[0], 0,
6402 TG3_RX_STD_BUFF_RING_SIZE(tp));
6403 if (tpr->rx_jmb_buffers)
6404 memset(&tpr->rx_jmb_buffers[0], 0,
6405 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6409 /* Zero out all descriptors. */
6410 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6412 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6413 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6414 tp->dev->mtu > ETH_DATA_LEN)
6415 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6416 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6418 /* Initialize invariants of the rings, we only set this
6419 * stuff once. This works because the card does not
6420 * write into the rx buffer posting rings.
6422 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6423 struct tg3_rx_buffer_desc *rxd;
6425 rxd = &tpr->rx_std[i];
6426 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6427 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6428 rxd->opaque = (RXD_OPAQUE_RING_STD |
6429 (i << RXD_OPAQUE_INDEX_SHIFT));
6432 /* Now allocate fresh SKBs for each rx ring. */
6433 for (i = 0; i < tp->rx_pending; i++) {
6434 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6435 netdev_warn(tp->dev,
6436 "Using a smaller RX standard ring. Only "
6437 "%d out of %d buffers were allocated "
6438 "successfully\n", i, tp->rx_pending);
6446 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
6447 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6450 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6452 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6455 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6456 struct tg3_rx_buffer_desc *rxd;
6458 rxd = &tpr->rx_jmb[i].std;
6459 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6460 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6462 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6463 (i << RXD_OPAQUE_INDEX_SHIFT));
6466 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6467 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6468 netdev_warn(tp->dev,
6469 "Using a smaller RX jumbo ring. Only %d "
6470 "out of %d buffers were allocated "
6471 "successfully\n", i, tp->rx_jumbo_pending);
6474 tp->rx_jumbo_pending = i;
6483 tg3_rx_prodring_free(tp, tpr);
6487 static void tg3_rx_prodring_fini(struct tg3 *tp,
6488 struct tg3_rx_prodring_set *tpr)
6490 kfree(tpr->rx_std_buffers);
6491 tpr->rx_std_buffers = NULL;
6492 kfree(tpr->rx_jmb_buffers);
6493 tpr->rx_jmb_buffers = NULL;
6495 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6496 tpr->rx_std, tpr->rx_std_mapping);
6500 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6501 tpr->rx_jmb, tpr->rx_jmb_mapping);
6506 static int tg3_rx_prodring_init(struct tg3 *tp,
6507 struct tg3_rx_prodring_set *tpr)
6509 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6511 if (!tpr->rx_std_buffers)
6514 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6515 TG3_RX_STD_RING_BYTES(tp),
6516 &tpr->rx_std_mapping,
6521 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6522 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6523 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6525 if (!tpr->rx_jmb_buffers)
6528 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6529 TG3_RX_JMB_RING_BYTES(tp),
6530 &tpr->rx_jmb_mapping,
6539 tg3_rx_prodring_fini(tp, tpr);
6543 /* Free up pending packets in all rx/tx rings.
6545 * The chip has been shut down and the driver detached from
6546 * the networking, so no interrupts or new tx packets will
6547 * end up in the driver. tp->{tx,}lock is not held and we are not
6548 * in an interrupt context and thus may sleep.
6550 static void tg3_free_rings(struct tg3 *tp)
6554 for (j = 0; j < tp->irq_cnt; j++) {
6555 struct tg3_napi *tnapi = &tp->napi[j];
6557 tg3_rx_prodring_free(tp, &tnapi->prodring);
6559 if (!tnapi->tx_buffers)
6562 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6563 struct ring_info *txp;
6564 struct sk_buff *skb;
6567 txp = &tnapi->tx_buffers[i];
6575 pci_unmap_single(tp->pdev,
6576 dma_unmap_addr(txp, mapping),
6583 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6584 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6585 pci_unmap_page(tp->pdev,
6586 dma_unmap_addr(txp, mapping),
6587 skb_shinfo(skb)->frags[k].size,
6592 dev_kfree_skb_any(skb);
6597 /* Initialize tx/rx rings for packet processing.
6599 * The chip has been shut down and the driver detached from
6600 * the networking, so no interrupts or new tx packets will
6601 * end up in the driver. tp->{tx,}lock are held and thus
6604 static int tg3_init_rings(struct tg3 *tp)
6608 /* Free up all the SKBs. */
6611 for (i = 0; i < tp->irq_cnt; i++) {
6612 struct tg3_napi *tnapi = &tp->napi[i];
6614 tnapi->last_tag = 0;
6615 tnapi->last_irq_tag = 0;
6616 tnapi->hw_status->status = 0;
6617 tnapi->hw_status->status_tag = 0;
6618 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6623 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6625 tnapi->rx_rcb_ptr = 0;
6627 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6629 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6639 * Must not be invoked with interrupt sources disabled and
6640 * the hardware shutdown down.
6642 static void tg3_free_consistent(struct tg3 *tp)
6646 for (i = 0; i < tp->irq_cnt; i++) {
6647 struct tg3_napi *tnapi = &tp->napi[i];
6649 if (tnapi->tx_ring) {
6650 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6651 tnapi->tx_ring, tnapi->tx_desc_mapping);
6652 tnapi->tx_ring = NULL;
6655 kfree(tnapi->tx_buffers);
6656 tnapi->tx_buffers = NULL;
6658 if (tnapi->rx_rcb) {
6659 dma_free_coherent(&tp->pdev->dev,
6660 TG3_RX_RCB_RING_BYTES(tp),
6662 tnapi->rx_rcb_mapping);
6663 tnapi->rx_rcb = NULL;
6666 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6668 if (tnapi->hw_status) {
6669 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6671 tnapi->status_mapping);
6672 tnapi->hw_status = NULL;
6677 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6678 tp->hw_stats, tp->stats_mapping);
6679 tp->hw_stats = NULL;
6684 * Must not be invoked with interrupt sources disabled and
6685 * the hardware shutdown down. Can sleep.
6687 static int tg3_alloc_consistent(struct tg3 *tp)
6691 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6692 sizeof(struct tg3_hw_stats),
6698 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6700 for (i = 0; i < tp->irq_cnt; i++) {
6701 struct tg3_napi *tnapi = &tp->napi[i];
6702 struct tg3_hw_status *sblk;
6704 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6706 &tnapi->status_mapping,
6708 if (!tnapi->hw_status)
6711 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6712 sblk = tnapi->hw_status;
6714 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6717 /* If multivector TSS is enabled, vector 0 does not handle
6718 * tx interrupts. Don't allocate any resources for it.
6720 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6721 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6722 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6725 if (!tnapi->tx_buffers)
6728 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6730 &tnapi->tx_desc_mapping,
6732 if (!tnapi->tx_ring)
6737 * When RSS is enabled, the status block format changes
6738 * slightly. The "rx_jumbo_consumer", "reserved",
6739 * and "rx_mini_consumer" members get mapped to the
6740 * other three rx return ring producer indexes.
6744 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6747 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6750 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6753 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6758 * If multivector RSS is enabled, vector 0 does not handle
6759 * rx or tx interrupts. Don't allocate any resources for it.
6761 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6764 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6765 TG3_RX_RCB_RING_BYTES(tp),
6766 &tnapi->rx_rcb_mapping,
6771 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6777 tg3_free_consistent(tp);
6781 #define MAX_WAIT_CNT 1000
6783 /* To stop a block, clear the enable bit and poll till it
6784 * clears. tp->lock is held.
6786 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6791 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6798 /* We can't enable/disable these bits of the
6799 * 5705/5750, just say success.
6812 for (i = 0; i < MAX_WAIT_CNT; i++) {
6815 if ((val & enable_bit) == 0)
6819 if (i == MAX_WAIT_CNT && !silent) {
6820 dev_err(&tp->pdev->dev,
6821 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6829 /* tp->lock is held. */
6830 static int tg3_abort_hw(struct tg3 *tp, int silent)
6834 tg3_disable_ints(tp);
6836 tp->rx_mode &= ~RX_MODE_ENABLE;
6837 tw32_f(MAC_RX_MODE, tp->rx_mode);
6840 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6841 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6842 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6843 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6844 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6845 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6847 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6848 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6849 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6850 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6851 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6852 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6853 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6855 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6856 tw32_f(MAC_MODE, tp->mac_mode);
6859 tp->tx_mode &= ~TX_MODE_ENABLE;
6860 tw32_f(MAC_TX_MODE, tp->tx_mode);
6862 for (i = 0; i < MAX_WAIT_CNT; i++) {
6864 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6867 if (i >= MAX_WAIT_CNT) {
6868 dev_err(&tp->pdev->dev,
6869 "%s timed out, TX_MODE_ENABLE will not clear "
6870 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6874 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6875 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6876 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6878 tw32(FTQ_RESET, 0xffffffff);
6879 tw32(FTQ_RESET, 0x00000000);
6881 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6882 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6884 for (i = 0; i < tp->irq_cnt; i++) {
6885 struct tg3_napi *tnapi = &tp->napi[i];
6886 if (tnapi->hw_status)
6887 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6890 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6895 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6900 /* NCSI does not support APE events */
6901 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6905 if (apedata != APE_SEG_SIG_MAGIC)
6908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6909 if (!(apedata & APE_FW_STATUS_READY))
6912 /* Wait for up to 1 millisecond for APE to service previous event. */
6913 for (i = 0; i < 10; i++) {
6914 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6917 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6919 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6920 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6921 event | APE_EVENT_STATUS_EVENT_PENDING);
6923 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6925 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6931 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6940 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6944 case RESET_KIND_INIT:
6945 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6946 APE_HOST_SEG_SIG_MAGIC);
6947 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6948 APE_HOST_SEG_LEN_MAGIC);
6949 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6950 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6951 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6952 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6953 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6954 APE_HOST_BEHAV_NO_PHYLOCK);
6955 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6956 TG3_APE_HOST_DRVR_STATE_START);
6958 event = APE_EVENT_STATUS_STATE_START;
6960 case RESET_KIND_SHUTDOWN:
6961 /* With the interface we are currently using,
6962 * APE does not track driver state. Wiping
6963 * out the HOST SEGMENT SIGNATURE forces
6964 * the APE to assume OS absent status.
6966 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6968 if (device_may_wakeup(&tp->pdev->dev) &&
6969 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6970 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6971 TG3_APE_HOST_WOL_SPEED_AUTO);
6972 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6974 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6976 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6978 event = APE_EVENT_STATUS_STATE_UNLOAD;
6980 case RESET_KIND_SUSPEND:
6981 event = APE_EVENT_STATUS_STATE_SUSPEND;
6987 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6989 tg3_ape_send_event(tp, event);
6992 /* tp->lock is held. */
6993 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6995 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6996 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6998 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
7000 case RESET_KIND_INIT:
7001 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7005 case RESET_KIND_SHUTDOWN:
7006 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7010 case RESET_KIND_SUSPEND:
7011 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7020 if (kind == RESET_KIND_INIT ||
7021 kind == RESET_KIND_SUSPEND)
7022 tg3_ape_driver_state_change(tp, kind);
7025 /* tp->lock is held. */
7026 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7028 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
7030 case RESET_KIND_INIT:
7031 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7032 DRV_STATE_START_DONE);
7035 case RESET_KIND_SHUTDOWN:
7036 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7037 DRV_STATE_UNLOAD_DONE);
7045 if (kind == RESET_KIND_SHUTDOWN)
7046 tg3_ape_driver_state_change(tp, kind);
7049 /* tp->lock is held. */
7050 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7052 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7054 case RESET_KIND_INIT:
7055 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7059 case RESET_KIND_SHUTDOWN:
7060 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7064 case RESET_KIND_SUSPEND:
7065 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7075 static int tg3_poll_fw(struct tg3 *tp)
7080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7081 /* Wait up to 20ms for init done. */
7082 for (i = 0; i < 200; i++) {
7083 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7090 /* Wait for firmware initialization to complete. */
7091 for (i = 0; i < 100000; i++) {
7092 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7093 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7098 /* Chip might not be fitted with firmware. Some Sun onboard
7099 * parts are configured like that. So don't signal the timeout
7100 * of the above loop as an error, but do report the lack of
7101 * running firmware once.
7104 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
7105 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
7107 netdev_info(tp->dev, "No firmware running\n");
7110 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7111 /* The 57765 A0 needs a little more
7112 * time to do some important work.
7120 /* Save PCI command register before chip reset */
7121 static void tg3_save_pci_state(struct tg3 *tp)
7123 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7126 /* Restore PCI state after chip reset */
7127 static void tg3_restore_pci_state(struct tg3 *tp)
7131 /* Re-enable indirect register accesses. */
7132 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7133 tp->misc_host_ctrl);
7135 /* Set MAX PCI retry to zero. */
7136 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7137 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7138 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
7139 val |= PCISTATE_RETRY_SAME_DMA;
7140 /* Allow reads and writes to the APE register and memory space. */
7141 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7142 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7143 PCISTATE_ALLOW_APE_SHMEM_WR |
7144 PCISTATE_ALLOW_APE_PSPACE_WR;
7145 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7147 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7149 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7150 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7151 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7153 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7154 tp->pci_cacheline_sz);
7155 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7160 /* Make sure PCI-X relaxed ordering bit is clear. */
7161 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7164 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7166 pcix_cmd &= ~PCI_X_CMD_ERO;
7167 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7171 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
7173 /* Chip reset on 5780 will reset MSI enable bit,
7174 * so need to restore it.
7176 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7179 pci_read_config_word(tp->pdev,
7180 tp->msi_cap + PCI_MSI_FLAGS,
7182 pci_write_config_word(tp->pdev,
7183 tp->msi_cap + PCI_MSI_FLAGS,
7184 ctrl | PCI_MSI_FLAGS_ENABLE);
7185 val = tr32(MSGINT_MODE);
7186 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7191 static void tg3_stop_fw(struct tg3 *);
7193 /* tp->lock is held. */
7194 static int tg3_chip_reset(struct tg3 *tp)
7197 void (*write_op)(struct tg3 *, u32, u32);
7202 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7204 /* No matching tg3_nvram_unlock() after this because
7205 * chip reset below will undo the nvram lock.
7207 tp->nvram_lock_cnt = 0;
7209 /* GRC_MISC_CFG core clock reset will clear the memory
7210 * enable bit in PCI register 4 and the MSI enable bit
7211 * on some chips, so we save relevant registers here.
7213 tg3_save_pci_state(tp);
7215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7216 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7217 tw32(GRC_FASTBOOT_PC, 0);
7220 * We must avoid the readl() that normally takes place.
7221 * It locks machines, causes machine checks, and other
7222 * fun things. So, temporarily disable the 5701
7223 * hardware workaround, while we do the reset.
7225 write_op = tp->write32;
7226 if (write_op == tg3_write_flush_reg32)
7227 tp->write32 = tg3_write32;
7229 /* Prevent the irq handler from reading or writing PCI registers
7230 * during chip reset when the memory enable bit in the PCI command
7231 * register may be cleared. The chip does not generate interrupt
7232 * at this time, but the irq handler may still be called due to irq
7233 * sharing or irqpoll.
7235 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
7236 for (i = 0; i < tp->irq_cnt; i++) {
7237 struct tg3_napi *tnapi = &tp->napi[i];
7238 if (tnapi->hw_status) {
7239 tnapi->hw_status->status = 0;
7240 tnapi->hw_status->status_tag = 0;
7242 tnapi->last_tag = 0;
7243 tnapi->last_irq_tag = 0;
7247 for (i = 0; i < tp->irq_cnt; i++)
7248 synchronize_irq(tp->napi[i].irq_vec);
7250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7251 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7252 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7256 val = GRC_MISC_CFG_CORECLK_RESET;
7258 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7259 /* Force PCIe 1.0a mode */
7260 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7261 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
7262 tr32(TG3_PCIE_PHY_TSTCTL) ==
7263 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7264 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7266 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7267 tw32(GRC_MISC_CFG, (1 << 29));
7272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7273 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7274 tw32(GRC_VCPU_EXT_CTRL,
7275 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7278 /* Manage gphy power for all CPMU absent PCIe devices. */
7279 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7280 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7281 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7283 tw32(GRC_MISC_CFG, val);
7285 /* restore 5701 hardware bug workaround write method */
7286 tp->write32 = write_op;
7288 /* Unfortunately, we have to delay before the PCI read back.
7289 * Some 575X chips even will not respond to a PCI cfg access
7290 * when the reset command is given to the chip.
7292 * How do these hardware designers expect things to work
7293 * properly if the PCI write is posted for a long period
7294 * of time? It is always necessary to have some method by
7295 * which a register read back can occur to push the write
7296 * out which does the reset.
7298 * For most tg3 variants the trick below was working.
7303 /* Flush PCI posted writes. The normal MMIO registers
7304 * are inaccessible at this time so this is the only
7305 * way to make this reliably (actually, this is no longer
7306 * the case, see above). I tried to use indirect
7307 * register read/write but this upset some 5701 variants.
7309 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7313 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7316 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7320 /* Wait for link training to complete. */
7321 for (i = 0; i < 5000; i++)
7324 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7325 pci_write_config_dword(tp->pdev, 0xc4,
7326 cfg_val | (1 << 15));
7329 /* Clear the "no snoop" and "relaxed ordering" bits. */
7330 pci_read_config_word(tp->pdev,
7331 tp->pcie_cap + PCI_EXP_DEVCTL,
7333 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7334 PCI_EXP_DEVCTL_NOSNOOP_EN);
7336 * Older PCIe devices only support the 128 byte
7337 * MPS setting. Enforce the restriction.
7339 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7340 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7341 pci_write_config_word(tp->pdev,
7342 tp->pcie_cap + PCI_EXP_DEVCTL,
7345 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7347 /* Clear error status */
7348 pci_write_config_word(tp->pdev,
7349 tp->pcie_cap + PCI_EXP_DEVSTA,
7350 PCI_EXP_DEVSTA_CED |
7351 PCI_EXP_DEVSTA_NFED |
7352 PCI_EXP_DEVSTA_FED |
7353 PCI_EXP_DEVSTA_URD);
7356 tg3_restore_pci_state(tp);
7358 tp->tg3_flags &= ~(TG3_FLAG_CHIP_RESETTING |
7359 TG3_FLAG_ERROR_PROCESSED);
7362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7363 val = tr32(MEMARB_MODE);
7364 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7366 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7368 tw32(0x5000, 0x400);
7371 tw32(GRC_MODE, tp->grc_mode);
7373 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7376 tw32(0xc4, val | (1 << 15));
7379 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7380 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7381 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7382 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7383 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7384 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7387 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7388 tp->mac_mode = MAC_MODE_APE_TX_EN |
7389 MAC_MODE_APE_RX_EN |
7390 MAC_MODE_TDE_ENABLE;
7392 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7393 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7395 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7396 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7401 tw32_f(MAC_MODE, val);
7404 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7406 err = tg3_poll_fw(tp);
7412 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7413 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7414 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7415 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
7418 tw32(0x7c00, val | (1 << 25));
7421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7422 val = tr32(TG3_CPMU_CLCK_ORIDE);
7423 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7426 /* Reprobe ASF enable state. */
7427 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7428 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7429 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7430 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7433 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7434 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7435 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7436 tp->last_event_jiffies = jiffies;
7437 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7438 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7445 /* tp->lock is held. */
7446 static void tg3_stop_fw(struct tg3 *tp)
7448 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7449 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7450 /* Wait for RX cpu to ACK the previous event. */
7451 tg3_wait_for_event_ack(tp);
7453 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7455 tg3_generate_fw_event(tp);
7457 /* Wait for RX cpu to ACK this event. */
7458 tg3_wait_for_event_ack(tp);
7462 /* tp->lock is held. */
7463 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7469 tg3_write_sig_pre_reset(tp, kind);
7471 tg3_abort_hw(tp, silent);
7472 err = tg3_chip_reset(tp);
7474 __tg3_set_mac_addr(tp, 0);
7476 tg3_write_sig_legacy(tp, kind);
7477 tg3_write_sig_post_reset(tp, kind);
7485 #define RX_CPU_SCRATCH_BASE 0x30000
7486 #define RX_CPU_SCRATCH_SIZE 0x04000
7487 #define TX_CPU_SCRATCH_BASE 0x34000
7488 #define TX_CPU_SCRATCH_SIZE 0x04000
7490 /* tp->lock is held. */
7491 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7495 BUG_ON(offset == TX_CPU_BASE &&
7496 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7499 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7501 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7504 if (offset == RX_CPU_BASE) {
7505 for (i = 0; i < 10000; i++) {
7506 tw32(offset + CPU_STATE, 0xffffffff);
7507 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7508 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7512 tw32(offset + CPU_STATE, 0xffffffff);
7513 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7516 for (i = 0; i < 10000; i++) {
7517 tw32(offset + CPU_STATE, 0xffffffff);
7518 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7519 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7525 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7526 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7530 /* Clear firmware's nvram arbitration. */
7531 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7532 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7537 unsigned int fw_base;
7538 unsigned int fw_len;
7539 const __be32 *fw_data;
7542 /* tp->lock is held. */
7543 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7544 int cpu_scratch_size, struct fw_info *info)
7546 int err, lock_err, i;
7547 void (*write_op)(struct tg3 *, u32, u32);
7549 if (cpu_base == TX_CPU_BASE &&
7550 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7552 "%s: Trying to load TX cpu firmware which is 5705\n",
7557 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7558 write_op = tg3_write_mem;
7560 write_op = tg3_write_indirect_reg32;
7562 /* It is possible that bootcode is still loading at this point.
7563 * Get the nvram lock first before halting the cpu.
7565 lock_err = tg3_nvram_lock(tp);
7566 err = tg3_halt_cpu(tp, cpu_base);
7568 tg3_nvram_unlock(tp);
7572 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7573 write_op(tp, cpu_scratch_base + i, 0);
7574 tw32(cpu_base + CPU_STATE, 0xffffffff);
7575 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7576 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7577 write_op(tp, (cpu_scratch_base +
7578 (info->fw_base & 0xffff) +
7580 be32_to_cpu(info->fw_data[i]));
7588 /* tp->lock is held. */
7589 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7591 struct fw_info info;
7592 const __be32 *fw_data;
7595 fw_data = (void *)tp->fw->data;
7597 /* Firmware blob starts with version numbers, followed by
7598 start address and length. We are setting complete length.
7599 length = end_address_of_bss - start_address_of_text.
7600 Remainder is the blob to be loaded contiguously
7601 from start address. */
7603 info.fw_base = be32_to_cpu(fw_data[1]);
7604 info.fw_len = tp->fw->size - 12;
7605 info.fw_data = &fw_data[3];
7607 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7608 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7613 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7614 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7619 /* Now startup only the RX cpu. */
7620 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7621 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7623 for (i = 0; i < 5; i++) {
7624 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7626 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7627 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7628 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7632 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7633 "should be %08x\n", __func__,
7634 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7637 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7638 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7643 /* 5705 needs a special version of the TSO firmware. */
7645 /* tp->lock is held. */
7646 static int tg3_load_tso_firmware(struct tg3 *tp)
7648 struct fw_info info;
7649 const __be32 *fw_data;
7650 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7653 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7656 fw_data = (void *)tp->fw->data;
7658 /* Firmware blob starts with version numbers, followed by
7659 start address and length. We are setting complete length.
7660 length = end_address_of_bss - start_address_of_text.
7661 Remainder is the blob to be loaded contiguously
7662 from start address. */
7664 info.fw_base = be32_to_cpu(fw_data[1]);
7665 cpu_scratch_size = tp->fw_len;
7666 info.fw_len = tp->fw->size - 12;
7667 info.fw_data = &fw_data[3];
7669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7670 cpu_base = RX_CPU_BASE;
7671 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7673 cpu_base = TX_CPU_BASE;
7674 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7675 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7678 err = tg3_load_firmware_cpu(tp, cpu_base,
7679 cpu_scratch_base, cpu_scratch_size,
7684 /* Now startup the cpu. */
7685 tw32(cpu_base + CPU_STATE, 0xffffffff);
7686 tw32_f(cpu_base + CPU_PC, info.fw_base);
7688 for (i = 0; i < 5; i++) {
7689 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7691 tw32(cpu_base + CPU_STATE, 0xffffffff);
7692 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7693 tw32_f(cpu_base + CPU_PC, info.fw_base);
7698 "%s fails to set CPU PC, is %08x should be %08x\n",
7699 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7702 tw32(cpu_base + CPU_STATE, 0xffffffff);
7703 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7708 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7710 struct tg3 *tp = netdev_priv(dev);
7711 struct sockaddr *addr = p;
7712 int err = 0, skip_mac_1 = 0;
7714 if (!is_valid_ether_addr(addr->sa_data))
7717 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7719 if (!netif_running(dev))
7722 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7723 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7725 addr0_high = tr32(MAC_ADDR_0_HIGH);
7726 addr0_low = tr32(MAC_ADDR_0_LOW);
7727 addr1_high = tr32(MAC_ADDR_1_HIGH);
7728 addr1_low = tr32(MAC_ADDR_1_LOW);
7730 /* Skip MAC addr 1 if ASF is using it. */
7731 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7732 !(addr1_high == 0 && addr1_low == 0))
7735 spin_lock_bh(&tp->lock);
7736 __tg3_set_mac_addr(tp, skip_mac_1);
7737 spin_unlock_bh(&tp->lock);
7742 /* tp->lock is held. */
7743 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7744 dma_addr_t mapping, u32 maxlen_flags,
7748 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7749 ((u64) mapping >> 32));
7751 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7752 ((u64) mapping & 0xffffffff));
7754 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7757 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7759 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7763 static void __tg3_set_rx_mode(struct net_device *);
7764 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7768 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7769 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7770 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7771 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7773 tw32(HOSTCC_TXCOL_TICKS, 0);
7774 tw32(HOSTCC_TXMAX_FRAMES, 0);
7775 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7778 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7779 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7780 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7781 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7783 tw32(HOSTCC_RXCOL_TICKS, 0);
7784 tw32(HOSTCC_RXMAX_FRAMES, 0);
7785 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7788 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7789 u32 val = ec->stats_block_coalesce_usecs;
7791 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7792 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7794 if (!netif_carrier_ok(tp->dev))
7797 tw32(HOSTCC_STAT_COAL_TICKS, val);
7800 for (i = 0; i < tp->irq_cnt - 1; i++) {
7803 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7804 tw32(reg, ec->rx_coalesce_usecs);
7805 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7806 tw32(reg, ec->rx_max_coalesced_frames);
7807 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7808 tw32(reg, ec->rx_max_coalesced_frames_irq);
7810 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7811 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7812 tw32(reg, ec->tx_coalesce_usecs);
7813 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7814 tw32(reg, ec->tx_max_coalesced_frames);
7815 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7816 tw32(reg, ec->tx_max_coalesced_frames_irq);
7820 for (; i < tp->irq_max - 1; i++) {
7821 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7822 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7823 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7825 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7826 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7827 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7828 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7833 /* tp->lock is held. */
7834 static void tg3_rings_reset(struct tg3 *tp)
7837 u32 stblk, txrcb, rxrcb, limit;
7838 struct tg3_napi *tnapi = &tp->napi[0];
7840 /* Disable all transmit rings but the first. */
7841 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7842 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7843 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7844 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7845 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7846 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7848 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7850 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7851 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7852 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7853 BDINFO_FLAGS_DISABLED);
7856 /* Disable all receive return rings but the first. */
7857 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7858 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7859 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7860 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7861 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7863 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7865 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7867 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7868 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7869 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7870 BDINFO_FLAGS_DISABLED);
7872 /* Disable interrupts */
7873 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7875 /* Zero mailbox registers. */
7876 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7877 for (i = 1; i < tp->irq_max; i++) {
7878 tp->napi[i].tx_prod = 0;
7879 tp->napi[i].tx_cons = 0;
7880 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7881 tw32_mailbox(tp->napi[i].prodmbox, 0);
7882 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7883 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7885 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7886 tw32_mailbox(tp->napi[0].prodmbox, 0);
7888 tp->napi[0].tx_prod = 0;
7889 tp->napi[0].tx_cons = 0;
7890 tw32_mailbox(tp->napi[0].prodmbox, 0);
7891 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7894 /* Make sure the NIC-based send BD rings are disabled. */
7895 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7896 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7897 for (i = 0; i < 16; i++)
7898 tw32_tx_mbox(mbox + i * 8, 0);
7901 txrcb = NIC_SRAM_SEND_RCB;
7902 rxrcb = NIC_SRAM_RCV_RET_RCB;
7904 /* Clear status block in ram. */
7905 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7907 /* Set status block DMA address */
7908 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7909 ((u64) tnapi->status_mapping >> 32));
7910 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7911 ((u64) tnapi->status_mapping & 0xffffffff));
7913 if (tnapi->tx_ring) {
7914 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7915 (TG3_TX_RING_SIZE <<
7916 BDINFO_FLAGS_MAXLEN_SHIFT),
7917 NIC_SRAM_TX_BUFFER_DESC);
7918 txrcb += TG3_BDINFO_SIZE;
7921 if (tnapi->rx_rcb) {
7922 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7923 (tp->rx_ret_ring_mask + 1) <<
7924 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7925 rxrcb += TG3_BDINFO_SIZE;
7928 stblk = HOSTCC_STATBLCK_RING1;
7930 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7931 u64 mapping = (u64)tnapi->status_mapping;
7932 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7933 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7935 /* Clear status block in ram. */
7936 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7938 if (tnapi->tx_ring) {
7939 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7940 (TG3_TX_RING_SIZE <<
7941 BDINFO_FLAGS_MAXLEN_SHIFT),
7942 NIC_SRAM_TX_BUFFER_DESC);
7943 txrcb += TG3_BDINFO_SIZE;
7946 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7947 ((tp->rx_ret_ring_mask + 1) <<
7948 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7951 rxrcb += TG3_BDINFO_SIZE;
7955 /* tp->lock is held. */
7956 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7958 u32 val, rdmac_mode;
7960 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7962 tg3_disable_ints(tp);
7966 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7968 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7969 tg3_abort_hw(tp, 1);
7971 /* Enable MAC control of LPI */
7972 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7973 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7974 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7975 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7977 tw32_f(TG3_CPMU_EEE_CTRL,
7978 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7980 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7981 TG3_CPMU_EEEMD_LPI_IN_TX |
7982 TG3_CPMU_EEEMD_LPI_IN_RX |
7983 TG3_CPMU_EEEMD_EEE_ENABLE;
7985 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7986 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7988 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7989 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7991 tw32_f(TG3_CPMU_EEE_MODE, val);
7993 tw32_f(TG3_CPMU_EEE_DBTMR1,
7994 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7995 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7997 tw32_f(TG3_CPMU_EEE_DBTMR2,
7998 TG3_CPMU_DBTMR2_APE_TX_2047US |
7999 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8005 err = tg3_chip_reset(tp);
8009 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8011 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8012 val = tr32(TG3_CPMU_CTRL);
8013 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8014 tw32(TG3_CPMU_CTRL, val);
8016 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8017 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8018 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8019 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8021 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8022 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8023 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8024 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8026 val = tr32(TG3_CPMU_HST_ACC);
8027 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8028 val |= CPMU_HST_ACC_MACCLK_6_25;
8029 tw32(TG3_CPMU_HST_ACC, val);
8032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8033 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8034 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8035 PCIE_PWR_MGMT_L1_THRESH_4MS;
8036 tw32(PCIE_PWR_MGMT_THRESH, val);
8038 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8039 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8041 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8043 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8044 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8047 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
8048 u32 grc_mode = tr32(GRC_MODE);
8050 /* Access the lower 1K of PL PCIE block registers. */
8051 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8052 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8054 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8055 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8056 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8058 tw32(GRC_MODE, grc_mode);
8061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8062 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8063 u32 grc_mode = tr32(GRC_MODE);
8065 /* Access the lower 1K of PL PCIE block registers. */
8066 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8067 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8069 val = tr32(TG3_PCIE_TLDLPL_PORT +
8070 TG3_PCIE_PL_LO_PHYCTL5);
8071 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8072 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8074 tw32(GRC_MODE, grc_mode);
8077 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8078 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8079 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8080 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8083 /* This works around an issue with Athlon chipsets on
8084 * B3 tigon3 silicon. This bit has no effect on any
8085 * other revision. But do not set this on PCI Express
8086 * chips and don't even touch the clocks if the CPMU is present.
8088 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
8089 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8090 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8091 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8094 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8095 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8096 val = tr32(TG3PCI_PCISTATE);
8097 val |= PCISTATE_RETRY_SAME_DMA;
8098 tw32(TG3PCI_PCISTATE, val);
8101 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
8102 /* Allow reads and writes to the
8103 * APE register and memory space.
8105 val = tr32(TG3PCI_PCISTATE);
8106 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8107 PCISTATE_ALLOW_APE_SHMEM_WR |
8108 PCISTATE_ALLOW_APE_PSPACE_WR;
8109 tw32(TG3PCI_PCISTATE, val);
8112 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8113 /* Enable some hw fixes. */
8114 val = tr32(TG3PCI_MSI_DATA);
8115 val |= (1 << 26) | (1 << 28) | (1 << 29);
8116 tw32(TG3PCI_MSI_DATA, val);
8119 /* Descriptor ring init may make accesses to the
8120 * NIC SRAM area to setup the TX descriptors, so we
8121 * can only do this after the hardware has been
8122 * successfully reset.
8124 err = tg3_init_rings(tp);
8128 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8129 val = tr32(TG3PCI_DMA_RW_CTRL) &
8130 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8131 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8132 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8133 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8134 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8135 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8136 /* This value is determined during the probe time DMA
8137 * engine test, tg3_test_dma.
8139 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8142 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8143 GRC_MODE_4X_NIC_SEND_RINGS |
8144 GRC_MODE_NO_TX_PHDR_CSUM |
8145 GRC_MODE_NO_RX_PHDR_CSUM);
8146 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8148 /* Pseudo-header checksum is done by hardware logic and not
8149 * the offload processers, so make the chip do the pseudo-
8150 * header checksums on receive. For transmit it is more
8151 * convenient to do the pseudo-header checksum in software
8152 * as Linux does that on transmit for us in all cases.
8154 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8158 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8160 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8161 val = tr32(GRC_MISC_CFG);
8163 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8164 tw32(GRC_MISC_CFG, val);
8166 /* Initialize MBUF/DESC pool. */
8167 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8169 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8170 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8171 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8172 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8174 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8175 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8176 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8177 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8180 fw_len = tp->fw_len;
8181 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8182 tw32(BUFMGR_MB_POOL_ADDR,
8183 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8184 tw32(BUFMGR_MB_POOL_SIZE,
8185 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8188 if (tp->dev->mtu <= ETH_DATA_LEN) {
8189 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8190 tp->bufmgr_config.mbuf_read_dma_low_water);
8191 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8192 tp->bufmgr_config.mbuf_mac_rx_low_water);
8193 tw32(BUFMGR_MB_HIGH_WATER,
8194 tp->bufmgr_config.mbuf_high_water);
8196 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8197 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8198 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8199 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8200 tw32(BUFMGR_MB_HIGH_WATER,
8201 tp->bufmgr_config.mbuf_high_water_jumbo);
8203 tw32(BUFMGR_DMA_LOW_WATER,
8204 tp->bufmgr_config.dma_low_water);
8205 tw32(BUFMGR_DMA_HIGH_WATER,
8206 tp->bufmgr_config.dma_high_water);
8208 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8210 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8212 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8213 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8214 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8215 tw32(BUFMGR_MODE, val);
8216 for (i = 0; i < 2000; i++) {
8217 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8222 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8226 /* Setup replenish threshold. */
8227 val = tp->rx_pending / 8;
8230 else if (val > tp->rx_std_max_post)
8231 val = tp->rx_std_max_post;
8232 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8233 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8234 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8236 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
8237 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
8240 tw32(RCVBDI_STD_THRESH, val);
8242 /* Initialize TG3_BDINFO's at:
8243 * RCVDBDI_STD_BD: standard eth size rx ring
8244 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8245 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8248 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8249 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8250 * ring attribute flags
8251 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8253 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8254 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8256 * The size of each ring is fixed in the firmware, but the location is
8259 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8260 ((u64) tpr->rx_std_mapping >> 32));
8261 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8262 ((u64) tpr->rx_std_mapping & 0xffffffff));
8263 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
8264 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8265 NIC_SRAM_RX_BUFFER_DESC);
8267 /* Disable the mini ring */
8268 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8269 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8270 BDINFO_FLAGS_DISABLED);
8272 /* Program the jumbo buffer descriptor ring control
8273 * blocks on those devices that have them.
8275 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8276 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8277 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8278 /* Setup replenish threshold. */
8279 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8281 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
8282 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8283 ((u64) tpr->rx_jmb_mapping >> 32));
8284 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8285 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8286 val = TG3_RX_JMB_RING_SIZE(tp) <<
8287 BDINFO_FLAGS_MAXLEN_SHIFT;
8288 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8289 val | BDINFO_FLAGS_USE_EXT_RECV);
8290 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8292 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8293 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8295 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8296 BDINFO_FLAGS_DISABLED);
8299 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8301 val = TG3_RX_STD_MAX_SIZE_5700;
8303 val = TG3_RX_STD_MAX_SIZE_5717;
8304 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8305 val |= (TG3_RX_STD_DMA_SZ << 2);
8307 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8309 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8311 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8313 tpr->rx_std_prod_idx = tp->rx_pending;
8314 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8316 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
8317 tp->rx_jumbo_pending : 0;
8318 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8320 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8321 tw32(STD_REPLENISH_LWM, 32);
8322 tw32(JMB_REPLENISH_LWM, 16);
8325 tg3_rings_reset(tp);
8327 /* Initialize MAC address and backoff seed. */
8328 __tg3_set_mac_addr(tp, 0);
8330 /* MTU + ethernet header + FCS + optional VLAN tag */
8331 tw32(MAC_RX_MTU_SIZE,
8332 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8334 /* The slot time is changed by tg3_setup_phy if we
8335 * run at gigabit with half duplex.
8337 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8338 (6 << TX_LENGTHS_IPG_SHIFT) |
8339 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8342 val |= tr32(MAC_TX_LENGTHS) &
8343 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8344 TX_LENGTHS_CNT_DWN_VAL_MSK);
8346 tw32(MAC_TX_LENGTHS, val);
8348 /* Receive rules. */
8349 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8350 tw32(RCVLPC_CONFIG, 0x0181);
8352 /* Calculate RDMAC_MODE setting early, we need it to determine
8353 * the RCVLPC_STATE_ENABLE mask.
8355 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8356 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8357 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8358 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8359 RDMAC_MODE_LNGREAD_ENAB);
8361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8362 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8366 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8367 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8368 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8369 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8372 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8373 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8374 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8375 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8376 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8377 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8378 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8382 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8383 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8385 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8386 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8388 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8391 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8394 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8400 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
8401 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8404 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8405 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8406 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8407 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8408 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8409 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8411 tw32(TG3_RDMA_RSRVCTRL_REG,
8412 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8417 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8418 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8419 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8420 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8423 /* Receive/send statistics. */
8424 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8425 val = tr32(RCVLPC_STATS_ENABLE);
8426 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8427 tw32(RCVLPC_STATS_ENABLE, val);
8428 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8429 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8430 val = tr32(RCVLPC_STATS_ENABLE);
8431 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8432 tw32(RCVLPC_STATS_ENABLE, val);
8434 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8436 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8437 tw32(SNDDATAI_STATSENAB, 0xffffff);
8438 tw32(SNDDATAI_STATSCTRL,
8439 (SNDDATAI_SCTRL_ENABLE |
8440 SNDDATAI_SCTRL_FASTUPD));
8442 /* Setup host coalescing engine. */
8443 tw32(HOSTCC_MODE, 0);
8444 for (i = 0; i < 2000; i++) {
8445 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8450 __tg3_set_coalesce(tp, &tp->coal);
8452 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8453 /* Status/statistics block address. See tg3_timer,
8454 * the tg3_periodic_fetch_stats call there, and
8455 * tg3_get_stats to see how this works for 5705/5750 chips.
8457 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8458 ((u64) tp->stats_mapping >> 32));
8459 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8460 ((u64) tp->stats_mapping & 0xffffffff));
8461 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8463 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8465 /* Clear statistics and status block memory areas */
8466 for (i = NIC_SRAM_STATS_BLK;
8467 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8469 tg3_write_mem(tp, i, 0);
8474 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8476 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8477 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8478 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8479 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8481 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8482 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8483 /* reset to prevent losing 1st rx packet intermittently */
8484 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8488 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8489 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8492 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8493 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8494 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8495 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8496 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8497 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8498 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8501 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8502 * If TG3_FLG2_IS_NIC is zero, we should read the
8503 * register to preserve the GPIO settings for LOMs. The GPIOs,
8504 * whether used as inputs or outputs, are set by boot code after
8507 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8510 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8511 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8512 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8515 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8516 GRC_LCLCTRL_GPIO_OUTPUT3;
8518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8519 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8521 tp->grc_local_ctrl &= ~gpio_mask;
8522 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8524 /* GPIO1 must be driven high for eeprom write protect */
8525 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8526 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8527 GRC_LCLCTRL_GPIO_OUTPUT1);
8529 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8532 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8534 val = tr32(MSGINT_MODE);
8535 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8536 tw32(MSGINT_MODE, val);
8539 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8540 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8544 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8545 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8546 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8547 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8548 WDMAC_MODE_LNGREAD_ENAB);
8550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8551 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8552 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8553 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8554 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8556 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8557 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8558 val |= WDMAC_MODE_RX_ACCEL;
8562 /* Enable host coalescing bug fix */
8563 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8564 val |= WDMAC_MODE_STATUS_TAG_FIX;
8566 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8567 val |= WDMAC_MODE_BURST_ALL_DATA;
8569 tw32_f(WDMAC_MODE, val);
8572 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8575 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8578 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8579 pcix_cmd |= PCI_X_CMD_READ_2K;
8580 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8581 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8582 pcix_cmd |= PCI_X_CMD_READ_2K;
8584 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8588 tw32_f(RDMAC_MODE, rdmac_mode);
8591 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8592 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8593 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8597 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8599 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8601 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8602 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8603 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8604 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
8605 val |= RCVDBDI_MODE_LRG_RING_SZ;
8606 tw32(RCVDBDI_MODE, val);
8607 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8608 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8609 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8610 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8611 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8612 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8613 tw32(SNDBDI_MODE, val);
8614 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8616 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8617 err = tg3_load_5701_a0_firmware_fix(tp);
8622 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8623 err = tg3_load_tso_firmware(tp);
8628 tp->tx_mode = TX_MODE_ENABLE;
8630 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8632 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8635 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8636 tp->tx_mode &= ~val;
8637 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8640 tw32_f(MAC_TX_MODE, tp->tx_mode);
8643 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8644 u32 reg = MAC_RSS_INDIR_TBL_0;
8645 u8 *ent = (u8 *)&val;
8647 /* Setup the indirection table */
8648 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8649 int idx = i % sizeof(val);
8651 ent[idx] = i % (tp->irq_cnt - 1);
8652 if (idx == sizeof(val) - 1) {
8658 /* Setup the "secret" hash key. */
8659 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8660 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8661 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8662 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8663 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8664 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8665 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8666 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8667 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8668 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8671 tp->rx_mode = RX_MODE_ENABLE;
8672 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8673 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8675 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8676 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8677 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8678 RX_MODE_RSS_IPV6_HASH_EN |
8679 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8680 RX_MODE_RSS_IPV4_HASH_EN |
8681 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8683 tw32_f(MAC_RX_MODE, tp->rx_mode);
8686 tw32(MAC_LED_CTRL, tp->led_ctrl);
8688 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8689 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8690 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8693 tw32_f(MAC_RX_MODE, tp->rx_mode);
8696 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8697 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8698 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8699 /* Set drive transmission level to 1.2V */
8700 /* only if the signal pre-emphasis bit is not set */
8701 val = tr32(MAC_SERDES_CFG);
8704 tw32(MAC_SERDES_CFG, val);
8706 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8707 tw32(MAC_SERDES_CFG, 0x616000);
8710 /* Prevent chip from dropping frames when flow control
8713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8717 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8720 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8721 /* Use hardware link auto-negotiation */
8722 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8725 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8726 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8729 tmp = tr32(SERDES_RX_CTRL);
8730 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8731 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8732 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8733 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8736 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8737 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8738 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8739 tp->link_config.speed = tp->link_config.orig_speed;
8740 tp->link_config.duplex = tp->link_config.orig_duplex;
8741 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8744 err = tg3_setup_phy(tp, 0);
8748 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8749 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8752 /* Clear CRC stats. */
8753 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8754 tg3_writephy(tp, MII_TG3_TEST1,
8755 tmp | MII_TG3_TEST1_CRC_EN);
8756 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8761 __tg3_set_rx_mode(tp->dev);
8763 /* Initialize receive rules. */
8764 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8765 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8766 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8767 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8769 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8770 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8774 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8778 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8780 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8782 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8784 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8786 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8788 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8790 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8792 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8794 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8796 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8798 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8800 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8802 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8804 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8812 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8813 /* Write our heartbeat update interval to APE. */
8814 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8815 APE_HOST_HEARTBEAT_INT_DISABLE);
8817 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8822 /* Called at device open time to get the chip ready for
8823 * packet processing. Invoked with tp->lock held.
8825 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8827 tg3_switch_clocks(tp);
8829 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8831 return tg3_reset_hw(tp, reset_phy);
8834 #define TG3_STAT_ADD32(PSTAT, REG) \
8835 do { u32 __val = tr32(REG); \
8836 (PSTAT)->low += __val; \
8837 if ((PSTAT)->low < __val) \
8838 (PSTAT)->high += 1; \
8841 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8843 struct tg3_hw_stats *sp = tp->hw_stats;
8845 if (!netif_carrier_ok(tp->dev))
8848 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8849 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8850 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8851 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8852 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8853 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8854 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8855 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8856 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8857 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8858 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8859 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8860 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8862 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8863 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8864 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8865 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8866 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8867 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8868 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8869 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8870 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8871 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8872 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8873 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8874 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8875 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8877 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8878 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8879 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8881 u32 val = tr32(HOSTCC_FLOW_ATTN);
8882 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8884 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8885 sp->rx_discards.low += val;
8886 if (sp->rx_discards.low < val)
8887 sp->rx_discards.high += 1;
8889 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8891 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8894 static void tg3_timer(unsigned long __opaque)
8896 struct tg3 *tp = (struct tg3 *) __opaque;
8901 spin_lock(&tp->lock);
8903 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8904 /* All of this garbage is because when using non-tagged
8905 * IRQ status the mailbox/status_block protocol the chip
8906 * uses with the cpu is race prone.
8908 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8909 tw32(GRC_LOCAL_CTRL,
8910 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8912 tw32(HOSTCC_MODE, tp->coalesce_mode |
8913 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8916 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8917 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8918 spin_unlock(&tp->lock);
8919 schedule_work(&tp->reset_task);
8924 /* This part only runs once per second. */
8925 if (!--tp->timer_counter) {
8926 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8927 tg3_periodic_fetch_stats(tp);
8929 if (tp->setlpicnt && !--tp->setlpicnt) {
8930 u32 val = tr32(TG3_CPMU_EEE_MODE);
8931 tw32(TG3_CPMU_EEE_MODE,
8932 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8935 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8939 mac_stat = tr32(MAC_STATUS);
8942 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8943 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8945 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8949 tg3_setup_phy(tp, 0);
8950 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8951 u32 mac_stat = tr32(MAC_STATUS);
8954 if (netif_carrier_ok(tp->dev) &&
8955 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8958 if (!netif_carrier_ok(tp->dev) &&
8959 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8960 MAC_STATUS_SIGNAL_DET))) {
8964 if (!tp->serdes_counter) {
8967 ~MAC_MODE_PORT_MODE_MASK));
8969 tw32_f(MAC_MODE, tp->mac_mode);
8972 tg3_setup_phy(tp, 0);
8974 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8975 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8976 tg3_serdes_parallel_detect(tp);
8979 tp->timer_counter = tp->timer_multiplier;
8982 /* Heartbeat is only sent once every 2 seconds.
8984 * The heartbeat is to tell the ASF firmware that the host
8985 * driver is still alive. In the event that the OS crashes,
8986 * ASF needs to reset the hardware to free up the FIFO space
8987 * that may be filled with rx packets destined for the host.
8988 * If the FIFO is full, ASF will no longer function properly.
8990 * Unintended resets have been reported on real time kernels
8991 * where the timer doesn't run on time. Netpoll will also have
8994 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8995 * to check the ring condition when the heartbeat is expiring
8996 * before doing the reset. This will prevent most unintended
8999 if (!--tp->asf_counter) {
9000 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
9001 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
9002 tg3_wait_for_event_ack(tp);
9004 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9005 FWCMD_NICDRV_ALIVE3);
9006 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9007 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9008 TG3_FW_UPDATE_TIMEOUT_SEC);
9010 tg3_generate_fw_event(tp);
9012 tp->asf_counter = tp->asf_multiplier;
9015 spin_unlock(&tp->lock);
9018 tp->timer.expires = jiffies + tp->timer_offset;
9019 add_timer(&tp->timer);
9022 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9025 unsigned long flags;
9027 struct tg3_napi *tnapi = &tp->napi[irq_num];
9029 if (tp->irq_cnt == 1)
9030 name = tp->dev->name;
9032 name = &tnapi->irq_lbl[0];
9033 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9034 name[IFNAMSIZ-1] = 0;
9037 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9039 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
9044 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9045 fn = tg3_interrupt_tagged;
9046 flags = IRQF_SHARED;
9049 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9052 static int tg3_test_interrupt(struct tg3 *tp)
9054 struct tg3_napi *tnapi = &tp->napi[0];
9055 struct net_device *dev = tp->dev;
9056 int err, i, intr_ok = 0;
9059 if (!netif_running(dev))
9062 tg3_disable_ints(tp);
9064 free_irq(tnapi->irq_vec, tnapi);
9067 * Turn off MSI one shot mode. Otherwise this test has no
9068 * observable way to know whether the interrupt was delivered.
9070 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9071 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9072 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9073 tw32(MSGINT_MODE, val);
9076 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9077 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9081 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9082 tg3_enable_ints(tp);
9084 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9087 for (i = 0; i < 5; i++) {
9088 u32 int_mbox, misc_host_ctrl;
9090 int_mbox = tr32_mailbox(tnapi->int_mbox);
9091 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9093 if ((int_mbox != 0) ||
9094 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9102 tg3_disable_ints(tp);
9104 free_irq(tnapi->irq_vec, tnapi);
9106 err = tg3_request_irq(tp, 0);
9112 /* Reenable MSI one shot mode. */
9113 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9114 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9115 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9116 tw32(MSGINT_MODE, val);
9124 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9125 * successfully restored
9127 static int tg3_test_msi(struct tg3 *tp)
9132 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
9135 /* Turn off SERR reporting in case MSI terminates with Master
9138 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9139 pci_write_config_word(tp->pdev, PCI_COMMAND,
9140 pci_cmd & ~PCI_COMMAND_SERR);
9142 err = tg3_test_interrupt(tp);
9144 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9149 /* other failures */
9153 /* MSI test failed, go back to INTx mode */
9154 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9155 "to INTx mode. Please report this failure to the PCI "
9156 "maintainer and include system chipset information\n");
9158 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9160 pci_disable_msi(tp->pdev);
9162 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
9163 tp->napi[0].irq_vec = tp->pdev->irq;
9165 err = tg3_request_irq(tp, 0);
9169 /* Need to reset the chip because the MSI cycle may have terminated
9170 * with Master Abort.
9172 tg3_full_lock(tp, 1);
9174 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9175 err = tg3_init_hw(tp, 1);
9177 tg3_full_unlock(tp);
9180 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9185 static int tg3_request_firmware(struct tg3 *tp)
9187 const __be32 *fw_data;
9189 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9190 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9195 fw_data = (void *)tp->fw->data;
9197 /* Firmware blob starts with version numbers, followed by
9198 * start address and _full_ length including BSS sections
9199 * (which must be longer than the actual data, of course
9202 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9203 if (tp->fw_len < (tp->fw->size - 12)) {
9204 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9205 tp->fw_len, tp->fw_needed);
9206 release_firmware(tp->fw);
9211 /* We no longer need firmware; we have it. */
9212 tp->fw_needed = NULL;
9216 static bool tg3_enable_msix(struct tg3 *tp)
9218 int i, rc, cpus = num_online_cpus();
9219 struct msix_entry msix_ent[tp->irq_max];
9222 /* Just fallback to the simpler MSI mode. */
9226 * We want as many rx rings enabled as there are cpus.
9227 * The first MSIX vector only deals with link interrupts, etc,
9228 * so we add one to the number of vectors we are requesting.
9230 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9232 for (i = 0; i < tp->irq_max; i++) {
9233 msix_ent[i].entry = i;
9234 msix_ent[i].vector = 0;
9237 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9240 } else if (rc != 0) {
9241 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9243 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9248 for (i = 0; i < tp->irq_max; i++)
9249 tp->napi[i].irq_vec = msix_ent[i].vector;
9251 netif_set_real_num_tx_queues(tp->dev, 1);
9252 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9253 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9254 pci_disable_msix(tp->pdev);
9258 if (tp->irq_cnt > 1) {
9259 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9262 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9263 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9264 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9271 static void tg3_ints_init(struct tg3 *tp)
9273 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
9274 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
9275 /* All MSI supporting chips should support tagged
9276 * status. Assert that this is the case.
9278 netdev_warn(tp->dev,
9279 "MSI without TAGGED_STATUS? Not using MSI\n");
9283 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
9284 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
9285 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
9286 pci_enable_msi(tp->pdev) == 0)
9287 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
9289 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9290 u32 msi_mode = tr32(MSGINT_MODE);
9291 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9293 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9294 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9297 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
9299 tp->napi[0].irq_vec = tp->pdev->irq;
9300 netif_set_real_num_tx_queues(tp->dev, 1);
9301 netif_set_real_num_rx_queues(tp->dev, 1);
9305 static void tg3_ints_fini(struct tg3 *tp)
9307 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
9308 pci_disable_msix(tp->pdev);
9309 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
9310 pci_disable_msi(tp->pdev);
9311 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
9312 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
9315 static int tg3_open(struct net_device *dev)
9317 struct tg3 *tp = netdev_priv(dev);
9320 if (tp->fw_needed) {
9321 err = tg3_request_firmware(tp);
9322 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9326 netdev_warn(tp->dev, "TSO capability disabled\n");
9327 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9328 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9329 netdev_notice(tp->dev, "TSO capability restored\n");
9330 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9334 netif_carrier_off(tp->dev);
9336 err = tg3_power_up(tp);
9340 tg3_full_lock(tp, 0);
9342 tg3_disable_ints(tp);
9343 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9345 tg3_full_unlock(tp);
9348 * Setup interrupts first so we know how
9349 * many NAPI resources to allocate
9353 /* The placement of this call is tied
9354 * to the setup and use of Host TX descriptors.
9356 err = tg3_alloc_consistent(tp);
9362 tg3_napi_enable(tp);
9364 for (i = 0; i < tp->irq_cnt; i++) {
9365 struct tg3_napi *tnapi = &tp->napi[i];
9366 err = tg3_request_irq(tp, i);
9368 for (i--; i >= 0; i--)
9369 free_irq(tnapi->irq_vec, tnapi);
9377 tg3_full_lock(tp, 0);
9379 err = tg3_init_hw(tp, 1);
9381 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9384 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9385 tp->timer_offset = HZ;
9387 tp->timer_offset = HZ / 10;
9389 BUG_ON(tp->timer_offset > HZ);
9390 tp->timer_counter = tp->timer_multiplier =
9391 (HZ / tp->timer_offset);
9392 tp->asf_counter = tp->asf_multiplier =
9393 ((HZ / tp->timer_offset) * 2);
9395 init_timer(&tp->timer);
9396 tp->timer.expires = jiffies + tp->timer_offset;
9397 tp->timer.data = (unsigned long) tp;
9398 tp->timer.function = tg3_timer;
9401 tg3_full_unlock(tp);
9406 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
9407 err = tg3_test_msi(tp);
9410 tg3_full_lock(tp, 0);
9411 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9413 tg3_full_unlock(tp);
9418 if (!(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9419 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9420 u32 val = tr32(PCIE_TRANSACTION_CFG);
9422 tw32(PCIE_TRANSACTION_CFG,
9423 val | PCIE_TRANS_CFG_1SHOT_MSI);
9429 tg3_full_lock(tp, 0);
9431 add_timer(&tp->timer);
9432 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9433 tg3_enable_ints(tp);
9435 tg3_full_unlock(tp);
9437 netif_tx_start_all_queues(dev);
9442 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9443 struct tg3_napi *tnapi = &tp->napi[i];
9444 free_irq(tnapi->irq_vec, tnapi);
9448 tg3_napi_disable(tp);
9450 tg3_free_consistent(tp);
9457 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9458 struct rtnl_link_stats64 *);
9459 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9461 static int tg3_close(struct net_device *dev)
9464 struct tg3 *tp = netdev_priv(dev);
9466 tg3_napi_disable(tp);
9467 cancel_work_sync(&tp->reset_task);
9469 netif_tx_stop_all_queues(dev);
9471 del_timer_sync(&tp->timer);
9475 tg3_full_lock(tp, 1);
9477 tg3_disable_ints(tp);
9479 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9481 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9483 tg3_full_unlock(tp);
9485 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9486 struct tg3_napi *tnapi = &tp->napi[i];
9487 free_irq(tnapi->irq_vec, tnapi);
9492 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9494 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9495 sizeof(tp->estats_prev));
9499 tg3_free_consistent(tp);
9503 netif_carrier_off(tp->dev);
9508 static inline u64 get_stat64(tg3_stat64_t *val)
9510 return ((u64)val->high << 32) | ((u64)val->low);
9513 static u64 calc_crc_errors(struct tg3 *tp)
9515 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9517 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9518 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9522 spin_lock_bh(&tp->lock);
9523 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9524 tg3_writephy(tp, MII_TG3_TEST1,
9525 val | MII_TG3_TEST1_CRC_EN);
9526 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9529 spin_unlock_bh(&tp->lock);
9531 tp->phy_crc_errors += val;
9533 return tp->phy_crc_errors;
9536 return get_stat64(&hw_stats->rx_fcs_errors);
9539 #define ESTAT_ADD(member) \
9540 estats->member = old_estats->member + \
9541 get_stat64(&hw_stats->member)
9543 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9545 struct tg3_ethtool_stats *estats = &tp->estats;
9546 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9547 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9552 ESTAT_ADD(rx_octets);
9553 ESTAT_ADD(rx_fragments);
9554 ESTAT_ADD(rx_ucast_packets);
9555 ESTAT_ADD(rx_mcast_packets);
9556 ESTAT_ADD(rx_bcast_packets);
9557 ESTAT_ADD(rx_fcs_errors);
9558 ESTAT_ADD(rx_align_errors);
9559 ESTAT_ADD(rx_xon_pause_rcvd);
9560 ESTAT_ADD(rx_xoff_pause_rcvd);
9561 ESTAT_ADD(rx_mac_ctrl_rcvd);
9562 ESTAT_ADD(rx_xoff_entered);
9563 ESTAT_ADD(rx_frame_too_long_errors);
9564 ESTAT_ADD(rx_jabbers);
9565 ESTAT_ADD(rx_undersize_packets);
9566 ESTAT_ADD(rx_in_length_errors);
9567 ESTAT_ADD(rx_out_length_errors);
9568 ESTAT_ADD(rx_64_or_less_octet_packets);
9569 ESTAT_ADD(rx_65_to_127_octet_packets);
9570 ESTAT_ADD(rx_128_to_255_octet_packets);
9571 ESTAT_ADD(rx_256_to_511_octet_packets);
9572 ESTAT_ADD(rx_512_to_1023_octet_packets);
9573 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9574 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9575 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9576 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9577 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9579 ESTAT_ADD(tx_octets);
9580 ESTAT_ADD(tx_collisions);
9581 ESTAT_ADD(tx_xon_sent);
9582 ESTAT_ADD(tx_xoff_sent);
9583 ESTAT_ADD(tx_flow_control);
9584 ESTAT_ADD(tx_mac_errors);
9585 ESTAT_ADD(tx_single_collisions);
9586 ESTAT_ADD(tx_mult_collisions);
9587 ESTAT_ADD(tx_deferred);
9588 ESTAT_ADD(tx_excessive_collisions);
9589 ESTAT_ADD(tx_late_collisions);
9590 ESTAT_ADD(tx_collide_2times);
9591 ESTAT_ADD(tx_collide_3times);
9592 ESTAT_ADD(tx_collide_4times);
9593 ESTAT_ADD(tx_collide_5times);
9594 ESTAT_ADD(tx_collide_6times);
9595 ESTAT_ADD(tx_collide_7times);
9596 ESTAT_ADD(tx_collide_8times);
9597 ESTAT_ADD(tx_collide_9times);
9598 ESTAT_ADD(tx_collide_10times);
9599 ESTAT_ADD(tx_collide_11times);
9600 ESTAT_ADD(tx_collide_12times);
9601 ESTAT_ADD(tx_collide_13times);
9602 ESTAT_ADD(tx_collide_14times);
9603 ESTAT_ADD(tx_collide_15times);
9604 ESTAT_ADD(tx_ucast_packets);
9605 ESTAT_ADD(tx_mcast_packets);
9606 ESTAT_ADD(tx_bcast_packets);
9607 ESTAT_ADD(tx_carrier_sense_errors);
9608 ESTAT_ADD(tx_discards);
9609 ESTAT_ADD(tx_errors);
9611 ESTAT_ADD(dma_writeq_full);
9612 ESTAT_ADD(dma_write_prioq_full);
9613 ESTAT_ADD(rxbds_empty);
9614 ESTAT_ADD(rx_discards);
9615 ESTAT_ADD(rx_errors);
9616 ESTAT_ADD(rx_threshold_hit);
9618 ESTAT_ADD(dma_readq_full);
9619 ESTAT_ADD(dma_read_prioq_full);
9620 ESTAT_ADD(tx_comp_queue_full);
9622 ESTAT_ADD(ring_set_send_prod_index);
9623 ESTAT_ADD(ring_status_update);
9624 ESTAT_ADD(nic_irqs);
9625 ESTAT_ADD(nic_avoided_irqs);
9626 ESTAT_ADD(nic_tx_threshold_hit);
9631 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9632 struct rtnl_link_stats64 *stats)
9634 struct tg3 *tp = netdev_priv(dev);
9635 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9636 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9641 stats->rx_packets = old_stats->rx_packets +
9642 get_stat64(&hw_stats->rx_ucast_packets) +
9643 get_stat64(&hw_stats->rx_mcast_packets) +
9644 get_stat64(&hw_stats->rx_bcast_packets);
9646 stats->tx_packets = old_stats->tx_packets +
9647 get_stat64(&hw_stats->tx_ucast_packets) +
9648 get_stat64(&hw_stats->tx_mcast_packets) +
9649 get_stat64(&hw_stats->tx_bcast_packets);
9651 stats->rx_bytes = old_stats->rx_bytes +
9652 get_stat64(&hw_stats->rx_octets);
9653 stats->tx_bytes = old_stats->tx_bytes +
9654 get_stat64(&hw_stats->tx_octets);
9656 stats->rx_errors = old_stats->rx_errors +
9657 get_stat64(&hw_stats->rx_errors);
9658 stats->tx_errors = old_stats->tx_errors +
9659 get_stat64(&hw_stats->tx_errors) +
9660 get_stat64(&hw_stats->tx_mac_errors) +
9661 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9662 get_stat64(&hw_stats->tx_discards);
9664 stats->multicast = old_stats->multicast +
9665 get_stat64(&hw_stats->rx_mcast_packets);
9666 stats->collisions = old_stats->collisions +
9667 get_stat64(&hw_stats->tx_collisions);
9669 stats->rx_length_errors = old_stats->rx_length_errors +
9670 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9671 get_stat64(&hw_stats->rx_undersize_packets);
9673 stats->rx_over_errors = old_stats->rx_over_errors +
9674 get_stat64(&hw_stats->rxbds_empty);
9675 stats->rx_frame_errors = old_stats->rx_frame_errors +
9676 get_stat64(&hw_stats->rx_align_errors);
9677 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9678 get_stat64(&hw_stats->tx_discards);
9679 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9680 get_stat64(&hw_stats->tx_carrier_sense_errors);
9682 stats->rx_crc_errors = old_stats->rx_crc_errors +
9683 calc_crc_errors(tp);
9685 stats->rx_missed_errors = old_stats->rx_missed_errors +
9686 get_stat64(&hw_stats->rx_discards);
9688 stats->rx_dropped = tp->rx_dropped;
9693 static inline u32 calc_crc(unsigned char *buf, int len)
9701 for (j = 0; j < len; j++) {
9704 for (k = 0; k < 8; k++) {
9717 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9719 /* accept or reject all multicast frames */
9720 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9721 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9722 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9723 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9726 static void __tg3_set_rx_mode(struct net_device *dev)
9728 struct tg3 *tp = netdev_priv(dev);
9731 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9732 RX_MODE_KEEP_VLAN_TAG);
9734 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9735 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9738 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9739 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9742 if (dev->flags & IFF_PROMISC) {
9743 /* Promiscuous mode. */
9744 rx_mode |= RX_MODE_PROMISC;
9745 } else if (dev->flags & IFF_ALLMULTI) {
9746 /* Accept all multicast. */
9747 tg3_set_multi(tp, 1);
9748 } else if (netdev_mc_empty(dev)) {
9749 /* Reject all multicast. */
9750 tg3_set_multi(tp, 0);
9752 /* Accept one or more multicast(s). */
9753 struct netdev_hw_addr *ha;
9754 u32 mc_filter[4] = { 0, };
9759 netdev_for_each_mc_addr(ha, dev) {
9760 crc = calc_crc(ha->addr, ETH_ALEN);
9762 regidx = (bit & 0x60) >> 5;
9764 mc_filter[regidx] |= (1 << bit);
9767 tw32(MAC_HASH_REG_0, mc_filter[0]);
9768 tw32(MAC_HASH_REG_1, mc_filter[1]);
9769 tw32(MAC_HASH_REG_2, mc_filter[2]);
9770 tw32(MAC_HASH_REG_3, mc_filter[3]);
9773 if (rx_mode != tp->rx_mode) {
9774 tp->rx_mode = rx_mode;
9775 tw32_f(MAC_RX_MODE, rx_mode);
9780 static void tg3_set_rx_mode(struct net_device *dev)
9782 struct tg3 *tp = netdev_priv(dev);
9784 if (!netif_running(dev))
9787 tg3_full_lock(tp, 0);
9788 __tg3_set_rx_mode(dev);
9789 tg3_full_unlock(tp);
9792 static int tg3_get_regs_len(struct net_device *dev)
9794 return TG3_REG_BLK_SIZE;
9797 static void tg3_get_regs(struct net_device *dev,
9798 struct ethtool_regs *regs, void *_p)
9800 struct tg3 *tp = netdev_priv(dev);
9804 memset(_p, 0, TG3_REG_BLK_SIZE);
9806 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9809 tg3_full_lock(tp, 0);
9811 tg3_dump_legacy_regs(tp, (u32 *)_p);
9813 tg3_full_unlock(tp);
9816 static int tg3_get_eeprom_len(struct net_device *dev)
9818 struct tg3 *tp = netdev_priv(dev);
9820 return tp->nvram_size;
9823 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9825 struct tg3 *tp = netdev_priv(dev);
9828 u32 i, offset, len, b_offset, b_count;
9831 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9834 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9837 offset = eeprom->offset;
9841 eeprom->magic = TG3_EEPROM_MAGIC;
9844 /* adjustments to start on required 4 byte boundary */
9845 b_offset = offset & 3;
9846 b_count = 4 - b_offset;
9847 if (b_count > len) {
9848 /* i.e. offset=1 len=2 */
9851 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9854 memcpy(data, ((char *)&val) + b_offset, b_count);
9857 eeprom->len += b_count;
9860 /* read bytes up to the last 4 byte boundary */
9861 pd = &data[eeprom->len];
9862 for (i = 0; i < (len - (len & 3)); i += 4) {
9863 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9868 memcpy(pd + i, &val, 4);
9873 /* read last bytes not ending on 4 byte boundary */
9874 pd = &data[eeprom->len];
9876 b_offset = offset + len - b_count;
9877 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9880 memcpy(pd, &val, b_count);
9881 eeprom->len += b_count;
9886 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9888 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9890 struct tg3 *tp = netdev_priv(dev);
9892 u32 offset, len, b_offset, odd_len;
9896 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9899 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9900 eeprom->magic != TG3_EEPROM_MAGIC)
9903 offset = eeprom->offset;
9906 if ((b_offset = (offset & 3))) {
9907 /* adjustments to start on required 4 byte boundary */
9908 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9919 /* adjustments to end on required 4 byte boundary */
9921 len = (len + 3) & ~3;
9922 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9928 if (b_offset || odd_len) {
9929 buf = kmalloc(len, GFP_KERNEL);
9933 memcpy(buf, &start, 4);
9935 memcpy(buf+len-4, &end, 4);
9936 memcpy(buf + b_offset, data, eeprom->len);
9939 ret = tg3_nvram_write_block(tp, offset, len, buf);
9947 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9949 struct tg3 *tp = netdev_priv(dev);
9951 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9952 struct phy_device *phydev;
9953 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9955 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9956 return phy_ethtool_gset(phydev, cmd);
9959 cmd->supported = (SUPPORTED_Autoneg);
9961 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9962 cmd->supported |= (SUPPORTED_1000baseT_Half |
9963 SUPPORTED_1000baseT_Full);
9965 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9966 cmd->supported |= (SUPPORTED_100baseT_Half |
9967 SUPPORTED_100baseT_Full |
9968 SUPPORTED_10baseT_Half |
9969 SUPPORTED_10baseT_Full |
9971 cmd->port = PORT_TP;
9973 cmd->supported |= SUPPORTED_FIBRE;
9974 cmd->port = PORT_FIBRE;
9977 cmd->advertising = tp->link_config.advertising;
9978 if (netif_running(dev)) {
9979 cmd->speed = tp->link_config.active_speed;
9980 cmd->duplex = tp->link_config.active_duplex;
9982 cmd->speed = SPEED_INVALID;
9983 cmd->duplex = DUPLEX_INVALID;
9985 cmd->phy_address = tp->phy_addr;
9986 cmd->transceiver = XCVR_INTERNAL;
9987 cmd->autoneg = tp->link_config.autoneg;
9993 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9995 struct tg3 *tp = netdev_priv(dev);
9997 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9998 struct phy_device *phydev;
9999 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10001 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10002 return phy_ethtool_sset(phydev, cmd);
10005 if (cmd->autoneg != AUTONEG_ENABLE &&
10006 cmd->autoneg != AUTONEG_DISABLE)
10009 if (cmd->autoneg == AUTONEG_DISABLE &&
10010 cmd->duplex != DUPLEX_FULL &&
10011 cmd->duplex != DUPLEX_HALF)
10014 if (cmd->autoneg == AUTONEG_ENABLE) {
10015 u32 mask = ADVERTISED_Autoneg |
10017 ADVERTISED_Asym_Pause;
10019 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10020 mask |= ADVERTISED_1000baseT_Half |
10021 ADVERTISED_1000baseT_Full;
10023 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10024 mask |= ADVERTISED_100baseT_Half |
10025 ADVERTISED_100baseT_Full |
10026 ADVERTISED_10baseT_Half |
10027 ADVERTISED_10baseT_Full |
10030 mask |= ADVERTISED_FIBRE;
10032 if (cmd->advertising & ~mask)
10035 mask &= (ADVERTISED_1000baseT_Half |
10036 ADVERTISED_1000baseT_Full |
10037 ADVERTISED_100baseT_Half |
10038 ADVERTISED_100baseT_Full |
10039 ADVERTISED_10baseT_Half |
10040 ADVERTISED_10baseT_Full);
10042 cmd->advertising &= mask;
10044 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10045 if (cmd->speed != SPEED_1000)
10048 if (cmd->duplex != DUPLEX_FULL)
10051 if (cmd->speed != SPEED_100 &&
10052 cmd->speed != SPEED_10)
10057 tg3_full_lock(tp, 0);
10059 tp->link_config.autoneg = cmd->autoneg;
10060 if (cmd->autoneg == AUTONEG_ENABLE) {
10061 tp->link_config.advertising = (cmd->advertising |
10062 ADVERTISED_Autoneg);
10063 tp->link_config.speed = SPEED_INVALID;
10064 tp->link_config.duplex = DUPLEX_INVALID;
10066 tp->link_config.advertising = 0;
10067 tp->link_config.speed = cmd->speed;
10068 tp->link_config.duplex = cmd->duplex;
10071 tp->link_config.orig_speed = tp->link_config.speed;
10072 tp->link_config.orig_duplex = tp->link_config.duplex;
10073 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10075 if (netif_running(dev))
10076 tg3_setup_phy(tp, 1);
10078 tg3_full_unlock(tp);
10083 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10085 struct tg3 *tp = netdev_priv(dev);
10087 strcpy(info->driver, DRV_MODULE_NAME);
10088 strcpy(info->version, DRV_MODULE_VERSION);
10089 strcpy(info->fw_version, tp->fw_ver);
10090 strcpy(info->bus_info, pci_name(tp->pdev));
10093 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10095 struct tg3 *tp = netdev_priv(dev);
10097 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
10098 device_can_wakeup(&tp->pdev->dev))
10099 wol->supported = WAKE_MAGIC;
10101 wol->supported = 0;
10103 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
10104 device_can_wakeup(&tp->pdev->dev))
10105 wol->wolopts = WAKE_MAGIC;
10106 memset(&wol->sopass, 0, sizeof(wol->sopass));
10109 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10111 struct tg3 *tp = netdev_priv(dev);
10112 struct device *dp = &tp->pdev->dev;
10114 if (wol->wolopts & ~WAKE_MAGIC)
10116 if ((wol->wolopts & WAKE_MAGIC) &&
10117 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
10120 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10122 spin_lock_bh(&tp->lock);
10123 if (device_may_wakeup(dp))
10124 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10126 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10127 spin_unlock_bh(&tp->lock);
10133 static u32 tg3_get_msglevel(struct net_device *dev)
10135 struct tg3 *tp = netdev_priv(dev);
10136 return tp->msg_enable;
10139 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10141 struct tg3 *tp = netdev_priv(dev);
10142 tp->msg_enable = value;
10145 static int tg3_nway_reset(struct net_device *dev)
10147 struct tg3 *tp = netdev_priv(dev);
10150 if (!netif_running(dev))
10153 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10156 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10157 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10159 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10163 spin_lock_bh(&tp->lock);
10165 tg3_readphy(tp, MII_BMCR, &bmcr);
10166 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10167 ((bmcr & BMCR_ANENABLE) ||
10168 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10169 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10173 spin_unlock_bh(&tp->lock);
10179 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10181 struct tg3 *tp = netdev_priv(dev);
10183 ering->rx_max_pending = tp->rx_std_ring_mask;
10184 ering->rx_mini_max_pending = 0;
10185 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10186 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10188 ering->rx_jumbo_max_pending = 0;
10190 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10192 ering->rx_pending = tp->rx_pending;
10193 ering->rx_mini_pending = 0;
10194 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10195 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10197 ering->rx_jumbo_pending = 0;
10199 ering->tx_pending = tp->napi[0].tx_pending;
10202 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10204 struct tg3 *tp = netdev_priv(dev);
10205 int i, irq_sync = 0, err = 0;
10207 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10208 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10209 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10210 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10211 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10212 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10215 if (netif_running(dev)) {
10217 tg3_netif_stop(tp);
10221 tg3_full_lock(tp, irq_sync);
10223 tp->rx_pending = ering->rx_pending;
10225 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10226 tp->rx_pending > 63)
10227 tp->rx_pending = 63;
10228 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10230 for (i = 0; i < tp->irq_max; i++)
10231 tp->napi[i].tx_pending = ering->tx_pending;
10233 if (netif_running(dev)) {
10234 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10235 err = tg3_restart_hw(tp, 1);
10237 tg3_netif_start(tp);
10240 tg3_full_unlock(tp);
10242 if (irq_sync && !err)
10248 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10250 struct tg3 *tp = netdev_priv(dev);
10252 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10254 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10255 epause->rx_pause = 1;
10257 epause->rx_pause = 0;
10259 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10260 epause->tx_pause = 1;
10262 epause->tx_pause = 0;
10265 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10267 struct tg3 *tp = netdev_priv(dev);
10270 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10272 struct phy_device *phydev;
10274 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10276 if (!(phydev->supported & SUPPORTED_Pause) ||
10277 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10278 (epause->rx_pause != epause->tx_pause)))
10281 tp->link_config.flowctrl = 0;
10282 if (epause->rx_pause) {
10283 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10285 if (epause->tx_pause) {
10286 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10287 newadv = ADVERTISED_Pause;
10289 newadv = ADVERTISED_Pause |
10290 ADVERTISED_Asym_Pause;
10291 } else if (epause->tx_pause) {
10292 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10293 newadv = ADVERTISED_Asym_Pause;
10297 if (epause->autoneg)
10298 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10300 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10302 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10303 u32 oldadv = phydev->advertising &
10304 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10305 if (oldadv != newadv) {
10306 phydev->advertising &=
10307 ~(ADVERTISED_Pause |
10308 ADVERTISED_Asym_Pause);
10309 phydev->advertising |= newadv;
10310 if (phydev->autoneg) {
10312 * Always renegotiate the link to
10313 * inform our link partner of our
10314 * flow control settings, even if the
10315 * flow control is forced. Let
10316 * tg3_adjust_link() do the final
10317 * flow control setup.
10319 return phy_start_aneg(phydev);
10323 if (!epause->autoneg)
10324 tg3_setup_flow_control(tp, 0, 0);
10326 tp->link_config.orig_advertising &=
10327 ~(ADVERTISED_Pause |
10328 ADVERTISED_Asym_Pause);
10329 tp->link_config.orig_advertising |= newadv;
10334 if (netif_running(dev)) {
10335 tg3_netif_stop(tp);
10339 tg3_full_lock(tp, irq_sync);
10341 if (epause->autoneg)
10342 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10344 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10345 if (epause->rx_pause)
10346 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10348 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10349 if (epause->tx_pause)
10350 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10352 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10354 if (netif_running(dev)) {
10355 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10356 err = tg3_restart_hw(tp, 1);
10358 tg3_netif_start(tp);
10361 tg3_full_unlock(tp);
10367 static int tg3_get_sset_count(struct net_device *dev, int sset)
10371 return TG3_NUM_TEST;
10373 return TG3_NUM_STATS;
10375 return -EOPNOTSUPP;
10379 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10381 switch (stringset) {
10383 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10386 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10389 WARN_ON(1); /* we need a WARN() */
10394 static int tg3_set_phys_id(struct net_device *dev,
10395 enum ethtool_phys_id_state state)
10397 struct tg3 *tp = netdev_priv(dev);
10399 if (!netif_running(tp->dev))
10403 case ETHTOOL_ID_ACTIVE:
10404 return 1; /* cycle on/off once per second */
10406 case ETHTOOL_ID_ON:
10407 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10408 LED_CTRL_1000MBPS_ON |
10409 LED_CTRL_100MBPS_ON |
10410 LED_CTRL_10MBPS_ON |
10411 LED_CTRL_TRAFFIC_OVERRIDE |
10412 LED_CTRL_TRAFFIC_BLINK |
10413 LED_CTRL_TRAFFIC_LED);
10416 case ETHTOOL_ID_OFF:
10417 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10418 LED_CTRL_TRAFFIC_OVERRIDE);
10421 case ETHTOOL_ID_INACTIVE:
10422 tw32(MAC_LED_CTRL, tp->led_ctrl);
10429 static void tg3_get_ethtool_stats(struct net_device *dev,
10430 struct ethtool_stats *estats, u64 *tmp_stats)
10432 struct tg3 *tp = netdev_priv(dev);
10433 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10436 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10440 u32 offset = 0, len = 0;
10443 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10444 tg3_nvram_read(tp, 0, &magic))
10447 if (magic == TG3_EEPROM_MAGIC) {
10448 for (offset = TG3_NVM_DIR_START;
10449 offset < TG3_NVM_DIR_END;
10450 offset += TG3_NVM_DIRENT_SIZE) {
10451 if (tg3_nvram_read(tp, offset, &val))
10454 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10455 TG3_NVM_DIRTYPE_EXTVPD)
10459 if (offset != TG3_NVM_DIR_END) {
10460 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10461 if (tg3_nvram_read(tp, offset + 4, &offset))
10464 offset = tg3_nvram_logical_addr(tp, offset);
10468 if (!offset || !len) {
10469 offset = TG3_NVM_VPD_OFF;
10470 len = TG3_NVM_VPD_LEN;
10473 buf = kmalloc(len, GFP_KERNEL);
10477 if (magic == TG3_EEPROM_MAGIC) {
10478 for (i = 0; i < len; i += 4) {
10479 /* The data is in little-endian format in NVRAM.
10480 * Use the big-endian read routines to preserve
10481 * the byte order as it exists in NVRAM.
10483 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10489 unsigned int pos = 0;
10491 ptr = (u8 *)&buf[0];
10492 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10493 cnt = pci_read_vpd(tp->pdev, pos,
10495 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10511 #define NVRAM_TEST_SIZE 0x100
10512 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10513 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10514 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10515 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10516 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10518 static int tg3_test_nvram(struct tg3 *tp)
10522 int i, j, k, err = 0, size;
10524 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10527 if (tg3_nvram_read(tp, 0, &magic) != 0)
10530 if (magic == TG3_EEPROM_MAGIC)
10531 size = NVRAM_TEST_SIZE;
10532 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10533 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10534 TG3_EEPROM_SB_FORMAT_1) {
10535 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10536 case TG3_EEPROM_SB_REVISION_0:
10537 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10539 case TG3_EEPROM_SB_REVISION_2:
10540 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10542 case TG3_EEPROM_SB_REVISION_3:
10543 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10550 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10551 size = NVRAM_SELFBOOT_HW_SIZE;
10555 buf = kmalloc(size, GFP_KERNEL);
10560 for (i = 0, j = 0; i < size; i += 4, j++) {
10561 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10568 /* Selfboot format */
10569 magic = be32_to_cpu(buf[0]);
10570 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10571 TG3_EEPROM_MAGIC_FW) {
10572 u8 *buf8 = (u8 *) buf, csum8 = 0;
10574 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10575 TG3_EEPROM_SB_REVISION_2) {
10576 /* For rev 2, the csum doesn't include the MBA. */
10577 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10579 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10582 for (i = 0; i < size; i++)
10595 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10596 TG3_EEPROM_MAGIC_HW) {
10597 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10598 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10599 u8 *buf8 = (u8 *) buf;
10601 /* Separate the parity bits and the data bytes. */
10602 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10603 if ((i == 0) || (i == 8)) {
10607 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10608 parity[k++] = buf8[i] & msk;
10610 } else if (i == 16) {
10614 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10615 parity[k++] = buf8[i] & msk;
10618 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10619 parity[k++] = buf8[i] & msk;
10622 data[j++] = buf8[i];
10626 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10627 u8 hw8 = hweight8(data[i]);
10629 if ((hw8 & 0x1) && parity[i])
10631 else if (!(hw8 & 0x1) && !parity[i])
10640 /* Bootstrap checksum at offset 0x10 */
10641 csum = calc_crc((unsigned char *) buf, 0x10);
10642 if (csum != le32_to_cpu(buf[0x10/4]))
10645 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10646 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10647 if (csum != le32_to_cpu(buf[0xfc/4]))
10652 buf = tg3_vpd_readblock(tp);
10656 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10657 PCI_VPD_LRDT_RO_DATA);
10659 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10663 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10666 i += PCI_VPD_LRDT_TAG_SIZE;
10667 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10668 PCI_VPD_RO_KEYWORD_CHKSUM);
10672 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10674 for (i = 0; i <= j; i++)
10675 csum8 += ((u8 *)buf)[i];
10689 #define TG3_SERDES_TIMEOUT_SEC 2
10690 #define TG3_COPPER_TIMEOUT_SEC 6
10692 static int tg3_test_link(struct tg3 *tp)
10696 if (!netif_running(tp->dev))
10699 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10700 max = TG3_SERDES_TIMEOUT_SEC;
10702 max = TG3_COPPER_TIMEOUT_SEC;
10704 for (i = 0; i < max; i++) {
10705 if (netif_carrier_ok(tp->dev))
10708 if (msleep_interruptible(1000))
10715 /* Only test the commonly used registers */
10716 static int tg3_test_registers(struct tg3 *tp)
10718 int i, is_5705, is_5750;
10719 u32 offset, read_mask, write_mask, val, save_val, read_val;
10723 #define TG3_FL_5705 0x1
10724 #define TG3_FL_NOT_5705 0x2
10725 #define TG3_FL_NOT_5788 0x4
10726 #define TG3_FL_NOT_5750 0x8
10730 /* MAC Control Registers */
10731 { MAC_MODE, TG3_FL_NOT_5705,
10732 0x00000000, 0x00ef6f8c },
10733 { MAC_MODE, TG3_FL_5705,
10734 0x00000000, 0x01ef6b8c },
10735 { MAC_STATUS, TG3_FL_NOT_5705,
10736 0x03800107, 0x00000000 },
10737 { MAC_STATUS, TG3_FL_5705,
10738 0x03800100, 0x00000000 },
10739 { MAC_ADDR_0_HIGH, 0x0000,
10740 0x00000000, 0x0000ffff },
10741 { MAC_ADDR_0_LOW, 0x0000,
10742 0x00000000, 0xffffffff },
10743 { MAC_RX_MTU_SIZE, 0x0000,
10744 0x00000000, 0x0000ffff },
10745 { MAC_TX_MODE, 0x0000,
10746 0x00000000, 0x00000070 },
10747 { MAC_TX_LENGTHS, 0x0000,
10748 0x00000000, 0x00003fff },
10749 { MAC_RX_MODE, TG3_FL_NOT_5705,
10750 0x00000000, 0x000007fc },
10751 { MAC_RX_MODE, TG3_FL_5705,
10752 0x00000000, 0x000007dc },
10753 { MAC_HASH_REG_0, 0x0000,
10754 0x00000000, 0xffffffff },
10755 { MAC_HASH_REG_1, 0x0000,
10756 0x00000000, 0xffffffff },
10757 { MAC_HASH_REG_2, 0x0000,
10758 0x00000000, 0xffffffff },
10759 { MAC_HASH_REG_3, 0x0000,
10760 0x00000000, 0xffffffff },
10762 /* Receive Data and Receive BD Initiator Control Registers. */
10763 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10764 0x00000000, 0xffffffff },
10765 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10766 0x00000000, 0xffffffff },
10767 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10768 0x00000000, 0x00000003 },
10769 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10770 0x00000000, 0xffffffff },
10771 { RCVDBDI_STD_BD+0, 0x0000,
10772 0x00000000, 0xffffffff },
10773 { RCVDBDI_STD_BD+4, 0x0000,
10774 0x00000000, 0xffffffff },
10775 { RCVDBDI_STD_BD+8, 0x0000,
10776 0x00000000, 0xffff0002 },
10777 { RCVDBDI_STD_BD+0xc, 0x0000,
10778 0x00000000, 0xffffffff },
10780 /* Receive BD Initiator Control Registers. */
10781 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10782 0x00000000, 0xffffffff },
10783 { RCVBDI_STD_THRESH, TG3_FL_5705,
10784 0x00000000, 0x000003ff },
10785 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10786 0x00000000, 0xffffffff },
10788 /* Host Coalescing Control Registers. */
10789 { HOSTCC_MODE, TG3_FL_NOT_5705,
10790 0x00000000, 0x00000004 },
10791 { HOSTCC_MODE, TG3_FL_5705,
10792 0x00000000, 0x000000f6 },
10793 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10794 0x00000000, 0xffffffff },
10795 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10796 0x00000000, 0x000003ff },
10797 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10798 0x00000000, 0xffffffff },
10799 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10800 0x00000000, 0x000003ff },
10801 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10802 0x00000000, 0xffffffff },
10803 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10804 0x00000000, 0x000000ff },
10805 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10806 0x00000000, 0xffffffff },
10807 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10808 0x00000000, 0x000000ff },
10809 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10810 0x00000000, 0xffffffff },
10811 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10812 0x00000000, 0xffffffff },
10813 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10814 0x00000000, 0xffffffff },
10815 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10816 0x00000000, 0x000000ff },
10817 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10818 0x00000000, 0xffffffff },
10819 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10820 0x00000000, 0x000000ff },
10821 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10822 0x00000000, 0xffffffff },
10823 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10824 0x00000000, 0xffffffff },
10825 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10826 0x00000000, 0xffffffff },
10827 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10828 0x00000000, 0xffffffff },
10829 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10830 0x00000000, 0xffffffff },
10831 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10832 0xffffffff, 0x00000000 },
10833 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10834 0xffffffff, 0x00000000 },
10836 /* Buffer Manager Control Registers. */
10837 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10838 0x00000000, 0x007fff80 },
10839 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10840 0x00000000, 0x007fffff },
10841 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10842 0x00000000, 0x0000003f },
10843 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10844 0x00000000, 0x000001ff },
10845 { BUFMGR_MB_HIGH_WATER, 0x0000,
10846 0x00000000, 0x000001ff },
10847 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10848 0xffffffff, 0x00000000 },
10849 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10850 0xffffffff, 0x00000000 },
10852 /* Mailbox Registers */
10853 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10854 0x00000000, 0x000001ff },
10855 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10856 0x00000000, 0x000001ff },
10857 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10858 0x00000000, 0x000007ff },
10859 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10860 0x00000000, 0x000001ff },
10862 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10865 is_5705 = is_5750 = 0;
10866 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10868 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10872 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10873 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10876 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10879 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10880 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10883 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10886 offset = (u32) reg_tbl[i].offset;
10887 read_mask = reg_tbl[i].read_mask;
10888 write_mask = reg_tbl[i].write_mask;
10890 /* Save the original register content */
10891 save_val = tr32(offset);
10893 /* Determine the read-only value. */
10894 read_val = save_val & read_mask;
10896 /* Write zero to the register, then make sure the read-only bits
10897 * are not changed and the read/write bits are all zeros.
10901 val = tr32(offset);
10903 /* Test the read-only and read/write bits. */
10904 if (((val & read_mask) != read_val) || (val & write_mask))
10907 /* Write ones to all the bits defined by RdMask and WrMask, then
10908 * make sure the read-only bits are not changed and the
10909 * read/write bits are all ones.
10911 tw32(offset, read_mask | write_mask);
10913 val = tr32(offset);
10915 /* Test the read-only bits. */
10916 if ((val & read_mask) != read_val)
10919 /* Test the read/write bits. */
10920 if ((val & write_mask) != write_mask)
10923 tw32(offset, save_val);
10929 if (netif_msg_hw(tp))
10930 netdev_err(tp->dev,
10931 "Register test failed at offset %x\n", offset);
10932 tw32(offset, save_val);
10936 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10938 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10942 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10943 for (j = 0; j < len; j += 4) {
10946 tg3_write_mem(tp, offset + j, test_pattern[i]);
10947 tg3_read_mem(tp, offset + j, &val);
10948 if (val != test_pattern[i])
10955 static int tg3_test_memory(struct tg3 *tp)
10957 static struct mem_entry {
10960 } mem_tbl_570x[] = {
10961 { 0x00000000, 0x00b50},
10962 { 0x00002000, 0x1c000},
10963 { 0xffffffff, 0x00000}
10964 }, mem_tbl_5705[] = {
10965 { 0x00000100, 0x0000c},
10966 { 0x00000200, 0x00008},
10967 { 0x00004000, 0x00800},
10968 { 0x00006000, 0x01000},
10969 { 0x00008000, 0x02000},
10970 { 0x00010000, 0x0e000},
10971 { 0xffffffff, 0x00000}
10972 }, mem_tbl_5755[] = {
10973 { 0x00000200, 0x00008},
10974 { 0x00004000, 0x00800},
10975 { 0x00006000, 0x00800},
10976 { 0x00008000, 0x02000},
10977 { 0x00010000, 0x0c000},
10978 { 0xffffffff, 0x00000}
10979 }, mem_tbl_5906[] = {
10980 { 0x00000200, 0x00008},
10981 { 0x00004000, 0x00400},
10982 { 0x00006000, 0x00400},
10983 { 0x00008000, 0x01000},
10984 { 0x00010000, 0x01000},
10985 { 0xffffffff, 0x00000}
10986 }, mem_tbl_5717[] = {
10987 { 0x00000200, 0x00008},
10988 { 0x00010000, 0x0a000},
10989 { 0x00020000, 0x13c00},
10990 { 0xffffffff, 0x00000}
10991 }, mem_tbl_57765[] = {
10992 { 0x00000200, 0x00008},
10993 { 0x00004000, 0x00800},
10994 { 0x00006000, 0x09800},
10995 { 0x00010000, 0x0a000},
10996 { 0xffffffff, 0x00000}
10998 struct mem_entry *mem_tbl;
11002 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
11003 mem_tbl = mem_tbl_5717;
11004 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11005 mem_tbl = mem_tbl_57765;
11006 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
11007 mem_tbl = mem_tbl_5755;
11008 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11009 mem_tbl = mem_tbl_5906;
11010 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
11011 mem_tbl = mem_tbl_5705;
11013 mem_tbl = mem_tbl_570x;
11015 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11016 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11024 #define TG3_MAC_LOOPBACK 0
11025 #define TG3_PHY_LOOPBACK 1
11027 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11029 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11030 u32 desc_idx, coal_now;
11031 struct sk_buff *skb, *rx_skb;
11034 int num_pkts, tx_len, rx_len, i, err;
11035 struct tg3_rx_buffer_desc *desc;
11036 struct tg3_napi *tnapi, *rnapi;
11037 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11039 tnapi = &tp->napi[0];
11040 rnapi = &tp->napi[0];
11041 if (tp->irq_cnt > 1) {
11042 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
11043 rnapi = &tp->napi[1];
11044 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
11045 tnapi = &tp->napi[1];
11047 coal_now = tnapi->coal_now | rnapi->coal_now;
11049 if (loopback_mode == TG3_MAC_LOOPBACK) {
11050 /* HW errata - mac loopback fails in some cases on 5780.
11051 * Normal traffic and PHY loopback are not affected by
11052 * errata. Also, the MAC loopback test is deprecated for
11053 * all newer ASIC revisions.
11055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11056 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
11059 mac_mode = tp->mac_mode &
11060 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11061 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11062 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11063 mac_mode |= MAC_MODE_LINK_POLARITY;
11064 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11065 mac_mode |= MAC_MODE_PORT_MODE_MII;
11067 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11068 tw32(MAC_MODE, mac_mode);
11069 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
11072 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11073 tg3_phy_fet_toggle_apd(tp, false);
11074 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11076 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11078 tg3_phy_toggle_automdix(tp, 0);
11080 tg3_writephy(tp, MII_BMCR, val);
11083 mac_mode = tp->mac_mode &
11084 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11085 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11086 tg3_writephy(tp, MII_TG3_FET_PTEST,
11087 MII_TG3_FET_PTEST_FRC_TX_LINK |
11088 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11089 /* The write needs to be flushed for the AC131 */
11090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11091 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11092 mac_mode |= MAC_MODE_PORT_MODE_MII;
11094 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11096 /* reset to prevent losing 1st rx packet intermittently */
11097 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11098 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11100 tw32_f(MAC_RX_MODE, tp->rx_mode);
11102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11103 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11104 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11105 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11106 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11107 mac_mode |= MAC_MODE_LINK_POLARITY;
11108 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11109 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11111 tw32(MAC_MODE, mac_mode);
11113 /* Wait for link */
11114 for (i = 0; i < 100; i++) {
11115 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11126 skb = netdev_alloc_skb(tp->dev, tx_len);
11130 tx_data = skb_put(skb, tx_len);
11131 memcpy(tx_data, tp->dev->dev_addr, 6);
11132 memset(tx_data + 6, 0x0, 8);
11134 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11136 for (i = 14; i < tx_len; i++)
11137 tx_data[i] = (u8) (i & 0xff);
11139 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11140 if (pci_dma_mapping_error(tp->pdev, map)) {
11141 dev_kfree_skb(skb);
11145 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11150 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11154 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
11159 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11160 tr32_mailbox(tnapi->prodmbox);
11164 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11165 for (i = 0; i < 35; i++) {
11166 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11171 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11172 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11173 if ((tx_idx == tnapi->tx_prod) &&
11174 (rx_idx == (rx_start_idx + num_pkts)))
11178 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11179 dev_kfree_skb(skb);
11181 if (tx_idx != tnapi->tx_prod)
11184 if (rx_idx != rx_start_idx + num_pkts)
11187 desc = &rnapi->rx_rcb[rx_start_idx];
11188 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11189 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11191 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11192 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11195 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
11196 if (rx_len != tx_len)
11199 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11200 if (opaque_key != RXD_OPAQUE_RING_STD)
11203 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11204 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
11206 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11209 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11210 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], mapping);
11213 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
11215 for (i = 14; i < tx_len; i++) {
11216 if (*(rx_skb->data + i) != (u8) (i & 0xff))
11221 /* tg3_free_rings will unmap and free the rx_skb */
11226 #define TG3_MAC_LOOPBACK_FAILED 1
11227 #define TG3_PHY_LOOPBACK_FAILED 2
11228 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
11229 TG3_PHY_LOOPBACK_FAILED)
11231 static int tg3_test_loopback(struct tg3 *tp)
11234 u32 eee_cap, cpmuctrl = 0;
11236 if (!netif_running(tp->dev))
11237 return TG3_LOOPBACK_FAILED;
11239 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11240 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11242 err = tg3_reset_hw(tp, 1);
11244 err = TG3_LOOPBACK_FAILED;
11248 /* Turn off gphy autopowerdown. */
11249 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11250 tg3_phy_toggle_apd(tp, false);
11252 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11256 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11258 /* Wait for up to 40 microseconds to acquire lock. */
11259 for (i = 0; i < 4; i++) {
11260 status = tr32(TG3_CPMU_MUTEX_GNT);
11261 if (status == CPMU_MUTEX_GNT_DRIVER)
11266 if (status != CPMU_MUTEX_GNT_DRIVER) {
11267 err = TG3_LOOPBACK_FAILED;
11271 /* Turn off link-based power management. */
11272 cpmuctrl = tr32(TG3_CPMU_CTRL);
11273 tw32(TG3_CPMU_CTRL,
11274 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11275 CPMU_CTRL_LINK_AWARE_MODE));
11278 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11279 err |= TG3_MAC_LOOPBACK_FAILED;
11281 if ((tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) &&
11282 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11283 err |= (TG3_MAC_LOOPBACK_FAILED << 2);
11285 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11286 tw32(TG3_CPMU_CTRL, cpmuctrl);
11288 /* Release the mutex */
11289 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11292 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11293 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
11294 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11295 err |= TG3_PHY_LOOPBACK_FAILED;
11296 if ((tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) &&
11297 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11298 err |= (TG3_PHY_LOOPBACK_FAILED << 2);
11301 /* Re-enable gphy autopowerdown. */
11302 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11303 tg3_phy_toggle_apd(tp, true);
11306 tp->phy_flags |= eee_cap;
11311 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11314 struct tg3 *tp = netdev_priv(dev);
11316 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11319 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11321 if (tg3_test_nvram(tp) != 0) {
11322 etest->flags |= ETH_TEST_FL_FAILED;
11325 if (tg3_test_link(tp) != 0) {
11326 etest->flags |= ETH_TEST_FL_FAILED;
11329 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11330 int err, err2 = 0, irq_sync = 0;
11332 if (netif_running(dev)) {
11334 tg3_netif_stop(tp);
11338 tg3_full_lock(tp, irq_sync);
11340 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11341 err = tg3_nvram_lock(tp);
11342 tg3_halt_cpu(tp, RX_CPU_BASE);
11343 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11344 tg3_halt_cpu(tp, TX_CPU_BASE);
11346 tg3_nvram_unlock(tp);
11348 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11351 if (tg3_test_registers(tp) != 0) {
11352 etest->flags |= ETH_TEST_FL_FAILED;
11355 if (tg3_test_memory(tp) != 0) {
11356 etest->flags |= ETH_TEST_FL_FAILED;
11359 if ((data[4] = tg3_test_loopback(tp)) != 0)
11360 etest->flags |= ETH_TEST_FL_FAILED;
11362 tg3_full_unlock(tp);
11364 if (tg3_test_interrupt(tp) != 0) {
11365 etest->flags |= ETH_TEST_FL_FAILED;
11369 tg3_full_lock(tp, 0);
11371 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11372 if (netif_running(dev)) {
11373 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11374 err2 = tg3_restart_hw(tp, 1);
11376 tg3_netif_start(tp);
11379 tg3_full_unlock(tp);
11381 if (irq_sync && !err2)
11384 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11385 tg3_power_down(tp);
11389 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11391 struct mii_ioctl_data *data = if_mii(ifr);
11392 struct tg3 *tp = netdev_priv(dev);
11395 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11396 struct phy_device *phydev;
11397 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11399 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11400 return phy_mii_ioctl(phydev, ifr, cmd);
11405 data->phy_id = tp->phy_addr;
11408 case SIOCGMIIREG: {
11411 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11412 break; /* We have no PHY */
11414 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11415 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11416 !netif_running(dev)))
11419 spin_lock_bh(&tp->lock);
11420 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11421 spin_unlock_bh(&tp->lock);
11423 data->val_out = mii_regval;
11429 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11430 break; /* We have no PHY */
11432 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11433 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11434 !netif_running(dev)))
11437 spin_lock_bh(&tp->lock);
11438 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11439 spin_unlock_bh(&tp->lock);
11447 return -EOPNOTSUPP;
11450 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11452 struct tg3 *tp = netdev_priv(dev);
11454 memcpy(ec, &tp->coal, sizeof(*ec));
11458 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11460 struct tg3 *tp = netdev_priv(dev);
11461 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11462 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11464 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11465 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11466 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11467 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11468 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11471 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11472 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11473 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11474 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11475 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11476 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11477 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11478 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11479 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11480 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11483 /* No rx interrupts will be generated if both are zero */
11484 if ((ec->rx_coalesce_usecs == 0) &&
11485 (ec->rx_max_coalesced_frames == 0))
11488 /* No tx interrupts will be generated if both are zero */
11489 if ((ec->tx_coalesce_usecs == 0) &&
11490 (ec->tx_max_coalesced_frames == 0))
11493 /* Only copy relevant parameters, ignore all others. */
11494 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11495 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11496 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11497 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11498 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11499 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11500 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11501 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11502 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11504 if (netif_running(dev)) {
11505 tg3_full_lock(tp, 0);
11506 __tg3_set_coalesce(tp, &tp->coal);
11507 tg3_full_unlock(tp);
11512 static const struct ethtool_ops tg3_ethtool_ops = {
11513 .get_settings = tg3_get_settings,
11514 .set_settings = tg3_set_settings,
11515 .get_drvinfo = tg3_get_drvinfo,
11516 .get_regs_len = tg3_get_regs_len,
11517 .get_regs = tg3_get_regs,
11518 .get_wol = tg3_get_wol,
11519 .set_wol = tg3_set_wol,
11520 .get_msglevel = tg3_get_msglevel,
11521 .set_msglevel = tg3_set_msglevel,
11522 .nway_reset = tg3_nway_reset,
11523 .get_link = ethtool_op_get_link,
11524 .get_eeprom_len = tg3_get_eeprom_len,
11525 .get_eeprom = tg3_get_eeprom,
11526 .set_eeprom = tg3_set_eeprom,
11527 .get_ringparam = tg3_get_ringparam,
11528 .set_ringparam = tg3_set_ringparam,
11529 .get_pauseparam = tg3_get_pauseparam,
11530 .set_pauseparam = tg3_set_pauseparam,
11531 .self_test = tg3_self_test,
11532 .get_strings = tg3_get_strings,
11533 .set_phys_id = tg3_set_phys_id,
11534 .get_ethtool_stats = tg3_get_ethtool_stats,
11535 .get_coalesce = tg3_get_coalesce,
11536 .set_coalesce = tg3_set_coalesce,
11537 .get_sset_count = tg3_get_sset_count,
11540 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11542 u32 cursize, val, magic;
11544 tp->nvram_size = EEPROM_CHIP_SIZE;
11546 if (tg3_nvram_read(tp, 0, &magic) != 0)
11549 if ((magic != TG3_EEPROM_MAGIC) &&
11550 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11551 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11555 * Size the chip by reading offsets at increasing powers of two.
11556 * When we encounter our validation signature, we know the addressing
11557 * has wrapped around, and thus have our chip size.
11561 while (cursize < tp->nvram_size) {
11562 if (tg3_nvram_read(tp, cursize, &val) != 0)
11571 tp->nvram_size = cursize;
11574 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11578 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11579 tg3_nvram_read(tp, 0, &val) != 0)
11582 /* Selfboot format */
11583 if (val != TG3_EEPROM_MAGIC) {
11584 tg3_get_eeprom_size(tp);
11588 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11590 /* This is confusing. We want to operate on the
11591 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11592 * call will read from NVRAM and byteswap the data
11593 * according to the byteswapping settings for all
11594 * other register accesses. This ensures the data we
11595 * want will always reside in the lower 16-bits.
11596 * However, the data in NVRAM is in LE format, which
11597 * means the data from the NVRAM read will always be
11598 * opposite the endianness of the CPU. The 16-bit
11599 * byteswap then brings the data to CPU endianness.
11601 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11605 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11608 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11612 nvcfg1 = tr32(NVRAM_CFG1);
11613 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11614 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11616 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11617 tw32(NVRAM_CFG1, nvcfg1);
11620 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11621 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11622 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11623 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11624 tp->nvram_jedecnum = JEDEC_ATMEL;
11625 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11626 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11628 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11629 tp->nvram_jedecnum = JEDEC_ATMEL;
11630 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11632 case FLASH_VENDOR_ATMEL_EEPROM:
11633 tp->nvram_jedecnum = JEDEC_ATMEL;
11634 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11635 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11637 case FLASH_VENDOR_ST:
11638 tp->nvram_jedecnum = JEDEC_ST;
11639 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11640 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11642 case FLASH_VENDOR_SAIFUN:
11643 tp->nvram_jedecnum = JEDEC_SAIFUN;
11644 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11646 case FLASH_VENDOR_SST_SMALL:
11647 case FLASH_VENDOR_SST_LARGE:
11648 tp->nvram_jedecnum = JEDEC_SST;
11649 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11653 tp->nvram_jedecnum = JEDEC_ATMEL;
11654 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11655 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11659 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11661 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11662 case FLASH_5752PAGE_SIZE_256:
11663 tp->nvram_pagesize = 256;
11665 case FLASH_5752PAGE_SIZE_512:
11666 tp->nvram_pagesize = 512;
11668 case FLASH_5752PAGE_SIZE_1K:
11669 tp->nvram_pagesize = 1024;
11671 case FLASH_5752PAGE_SIZE_2K:
11672 tp->nvram_pagesize = 2048;
11674 case FLASH_5752PAGE_SIZE_4K:
11675 tp->nvram_pagesize = 4096;
11677 case FLASH_5752PAGE_SIZE_264:
11678 tp->nvram_pagesize = 264;
11680 case FLASH_5752PAGE_SIZE_528:
11681 tp->nvram_pagesize = 528;
11686 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11690 nvcfg1 = tr32(NVRAM_CFG1);
11692 /* NVRAM protection for TPM */
11693 if (nvcfg1 & (1 << 27))
11694 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11696 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11697 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11698 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11699 tp->nvram_jedecnum = JEDEC_ATMEL;
11700 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11702 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11703 tp->nvram_jedecnum = JEDEC_ATMEL;
11704 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11705 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11707 case FLASH_5752VENDOR_ST_M45PE10:
11708 case FLASH_5752VENDOR_ST_M45PE20:
11709 case FLASH_5752VENDOR_ST_M45PE40:
11710 tp->nvram_jedecnum = JEDEC_ST;
11711 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11712 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11716 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11717 tg3_nvram_get_pagesize(tp, nvcfg1);
11719 /* For eeprom, set pagesize to maximum eeprom size */
11720 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11722 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11723 tw32(NVRAM_CFG1, nvcfg1);
11727 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11729 u32 nvcfg1, protect = 0;
11731 nvcfg1 = tr32(NVRAM_CFG1);
11733 /* NVRAM protection for TPM */
11734 if (nvcfg1 & (1 << 27)) {
11735 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11739 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11741 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11742 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11743 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11744 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11745 tp->nvram_jedecnum = JEDEC_ATMEL;
11746 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11747 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11748 tp->nvram_pagesize = 264;
11749 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11750 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11751 tp->nvram_size = (protect ? 0x3e200 :
11752 TG3_NVRAM_SIZE_512KB);
11753 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11754 tp->nvram_size = (protect ? 0x1f200 :
11755 TG3_NVRAM_SIZE_256KB);
11757 tp->nvram_size = (protect ? 0x1f200 :
11758 TG3_NVRAM_SIZE_128KB);
11760 case FLASH_5752VENDOR_ST_M45PE10:
11761 case FLASH_5752VENDOR_ST_M45PE20:
11762 case FLASH_5752VENDOR_ST_M45PE40:
11763 tp->nvram_jedecnum = JEDEC_ST;
11764 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11765 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11766 tp->nvram_pagesize = 256;
11767 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11768 tp->nvram_size = (protect ?
11769 TG3_NVRAM_SIZE_64KB :
11770 TG3_NVRAM_SIZE_128KB);
11771 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11772 tp->nvram_size = (protect ?
11773 TG3_NVRAM_SIZE_64KB :
11774 TG3_NVRAM_SIZE_256KB);
11776 tp->nvram_size = (protect ?
11777 TG3_NVRAM_SIZE_128KB :
11778 TG3_NVRAM_SIZE_512KB);
11783 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11787 nvcfg1 = tr32(NVRAM_CFG1);
11789 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11790 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11791 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11792 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11793 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11794 tp->nvram_jedecnum = JEDEC_ATMEL;
11795 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11796 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11798 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11799 tw32(NVRAM_CFG1, nvcfg1);
11801 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11802 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11803 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11804 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11805 tp->nvram_jedecnum = JEDEC_ATMEL;
11806 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11807 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11808 tp->nvram_pagesize = 264;
11810 case FLASH_5752VENDOR_ST_M45PE10:
11811 case FLASH_5752VENDOR_ST_M45PE20:
11812 case FLASH_5752VENDOR_ST_M45PE40:
11813 tp->nvram_jedecnum = JEDEC_ST;
11814 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11815 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11816 tp->nvram_pagesize = 256;
11821 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11823 u32 nvcfg1, protect = 0;
11825 nvcfg1 = tr32(NVRAM_CFG1);
11827 /* NVRAM protection for TPM */
11828 if (nvcfg1 & (1 << 27)) {
11829 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11833 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11835 case FLASH_5761VENDOR_ATMEL_ADB021D:
11836 case FLASH_5761VENDOR_ATMEL_ADB041D:
11837 case FLASH_5761VENDOR_ATMEL_ADB081D:
11838 case FLASH_5761VENDOR_ATMEL_ADB161D:
11839 case FLASH_5761VENDOR_ATMEL_MDB021D:
11840 case FLASH_5761VENDOR_ATMEL_MDB041D:
11841 case FLASH_5761VENDOR_ATMEL_MDB081D:
11842 case FLASH_5761VENDOR_ATMEL_MDB161D:
11843 tp->nvram_jedecnum = JEDEC_ATMEL;
11844 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11845 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11846 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11847 tp->nvram_pagesize = 256;
11849 case FLASH_5761VENDOR_ST_A_M45PE20:
11850 case FLASH_5761VENDOR_ST_A_M45PE40:
11851 case FLASH_5761VENDOR_ST_A_M45PE80:
11852 case FLASH_5761VENDOR_ST_A_M45PE16:
11853 case FLASH_5761VENDOR_ST_M_M45PE20:
11854 case FLASH_5761VENDOR_ST_M_M45PE40:
11855 case FLASH_5761VENDOR_ST_M_M45PE80:
11856 case FLASH_5761VENDOR_ST_M_M45PE16:
11857 tp->nvram_jedecnum = JEDEC_ST;
11858 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11859 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11860 tp->nvram_pagesize = 256;
11865 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11868 case FLASH_5761VENDOR_ATMEL_ADB161D:
11869 case FLASH_5761VENDOR_ATMEL_MDB161D:
11870 case FLASH_5761VENDOR_ST_A_M45PE16:
11871 case FLASH_5761VENDOR_ST_M_M45PE16:
11872 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11874 case FLASH_5761VENDOR_ATMEL_ADB081D:
11875 case FLASH_5761VENDOR_ATMEL_MDB081D:
11876 case FLASH_5761VENDOR_ST_A_M45PE80:
11877 case FLASH_5761VENDOR_ST_M_M45PE80:
11878 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11880 case FLASH_5761VENDOR_ATMEL_ADB041D:
11881 case FLASH_5761VENDOR_ATMEL_MDB041D:
11882 case FLASH_5761VENDOR_ST_A_M45PE40:
11883 case FLASH_5761VENDOR_ST_M_M45PE40:
11884 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11886 case FLASH_5761VENDOR_ATMEL_ADB021D:
11887 case FLASH_5761VENDOR_ATMEL_MDB021D:
11888 case FLASH_5761VENDOR_ST_A_M45PE20:
11889 case FLASH_5761VENDOR_ST_M_M45PE20:
11890 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11896 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11898 tp->nvram_jedecnum = JEDEC_ATMEL;
11899 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11900 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11903 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11907 nvcfg1 = tr32(NVRAM_CFG1);
11909 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11910 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11911 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11912 tp->nvram_jedecnum = JEDEC_ATMEL;
11913 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11914 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11916 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11917 tw32(NVRAM_CFG1, nvcfg1);
11919 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11920 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11921 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11922 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11923 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11924 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11925 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11926 tp->nvram_jedecnum = JEDEC_ATMEL;
11927 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11928 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11930 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11931 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11932 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11933 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11934 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11936 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11937 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11938 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11940 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11941 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11942 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11946 case FLASH_5752VENDOR_ST_M45PE10:
11947 case FLASH_5752VENDOR_ST_M45PE20:
11948 case FLASH_5752VENDOR_ST_M45PE40:
11949 tp->nvram_jedecnum = JEDEC_ST;
11950 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11951 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11953 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11954 case FLASH_5752VENDOR_ST_M45PE10:
11955 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11957 case FLASH_5752VENDOR_ST_M45PE20:
11958 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11960 case FLASH_5752VENDOR_ST_M45PE40:
11961 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11966 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11970 tg3_nvram_get_pagesize(tp, nvcfg1);
11971 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11972 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11976 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11980 nvcfg1 = tr32(NVRAM_CFG1);
11982 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11983 case FLASH_5717VENDOR_ATMEL_EEPROM:
11984 case FLASH_5717VENDOR_MICRO_EEPROM:
11985 tp->nvram_jedecnum = JEDEC_ATMEL;
11986 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11987 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11989 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11990 tw32(NVRAM_CFG1, nvcfg1);
11992 case FLASH_5717VENDOR_ATMEL_MDB011D:
11993 case FLASH_5717VENDOR_ATMEL_ADB011B:
11994 case FLASH_5717VENDOR_ATMEL_ADB011D:
11995 case FLASH_5717VENDOR_ATMEL_MDB021D:
11996 case FLASH_5717VENDOR_ATMEL_ADB021B:
11997 case FLASH_5717VENDOR_ATMEL_ADB021D:
11998 case FLASH_5717VENDOR_ATMEL_45USPT:
11999 tp->nvram_jedecnum = JEDEC_ATMEL;
12000 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12001 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12003 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12004 case FLASH_5717VENDOR_ATMEL_MDB021D:
12005 /* Detect size with tg3_nvram_get_size() */
12007 case FLASH_5717VENDOR_ATMEL_ADB021B:
12008 case FLASH_5717VENDOR_ATMEL_ADB021D:
12009 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12012 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12016 case FLASH_5717VENDOR_ST_M_M25PE10:
12017 case FLASH_5717VENDOR_ST_A_M25PE10:
12018 case FLASH_5717VENDOR_ST_M_M45PE10:
12019 case FLASH_5717VENDOR_ST_A_M45PE10:
12020 case FLASH_5717VENDOR_ST_M_M25PE20:
12021 case FLASH_5717VENDOR_ST_A_M25PE20:
12022 case FLASH_5717VENDOR_ST_M_M45PE20:
12023 case FLASH_5717VENDOR_ST_A_M45PE20:
12024 case FLASH_5717VENDOR_ST_25USPT:
12025 case FLASH_5717VENDOR_ST_45USPT:
12026 tp->nvram_jedecnum = JEDEC_ST;
12027 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12028 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12030 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12031 case FLASH_5717VENDOR_ST_M_M25PE20:
12032 case FLASH_5717VENDOR_ST_M_M45PE20:
12033 /* Detect size with tg3_nvram_get_size() */
12035 case FLASH_5717VENDOR_ST_A_M25PE20:
12036 case FLASH_5717VENDOR_ST_A_M45PE20:
12037 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12040 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12045 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12049 tg3_nvram_get_pagesize(tp, nvcfg1);
12050 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12051 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12054 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12056 u32 nvcfg1, nvmpinstrp;
12058 nvcfg1 = tr32(NVRAM_CFG1);
12059 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12061 switch (nvmpinstrp) {
12062 case FLASH_5720_EEPROM_HD:
12063 case FLASH_5720_EEPROM_LD:
12064 tp->nvram_jedecnum = JEDEC_ATMEL;
12065 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12067 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12068 tw32(NVRAM_CFG1, nvcfg1);
12069 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12070 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12072 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12074 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12075 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12076 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12077 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12078 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12079 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12080 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12081 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12082 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12083 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12084 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12085 case FLASH_5720VENDOR_ATMEL_45USPT:
12086 tp->nvram_jedecnum = JEDEC_ATMEL;
12087 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12088 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12090 switch (nvmpinstrp) {
12091 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12092 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12093 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12094 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12096 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12097 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12098 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12099 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12101 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12102 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12103 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12106 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12110 case FLASH_5720VENDOR_M_ST_M25PE10:
12111 case FLASH_5720VENDOR_M_ST_M45PE10:
12112 case FLASH_5720VENDOR_A_ST_M25PE10:
12113 case FLASH_5720VENDOR_A_ST_M45PE10:
12114 case FLASH_5720VENDOR_M_ST_M25PE20:
12115 case FLASH_5720VENDOR_M_ST_M45PE20:
12116 case FLASH_5720VENDOR_A_ST_M25PE20:
12117 case FLASH_5720VENDOR_A_ST_M45PE20:
12118 case FLASH_5720VENDOR_M_ST_M25PE40:
12119 case FLASH_5720VENDOR_M_ST_M45PE40:
12120 case FLASH_5720VENDOR_A_ST_M25PE40:
12121 case FLASH_5720VENDOR_A_ST_M45PE40:
12122 case FLASH_5720VENDOR_M_ST_M25PE80:
12123 case FLASH_5720VENDOR_M_ST_M45PE80:
12124 case FLASH_5720VENDOR_A_ST_M25PE80:
12125 case FLASH_5720VENDOR_A_ST_M45PE80:
12126 case FLASH_5720VENDOR_ST_25USPT:
12127 case FLASH_5720VENDOR_ST_45USPT:
12128 tp->nvram_jedecnum = JEDEC_ST;
12129 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12130 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12132 switch (nvmpinstrp) {
12133 case FLASH_5720VENDOR_M_ST_M25PE20:
12134 case FLASH_5720VENDOR_M_ST_M45PE20:
12135 case FLASH_5720VENDOR_A_ST_M25PE20:
12136 case FLASH_5720VENDOR_A_ST_M45PE20:
12137 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12139 case FLASH_5720VENDOR_M_ST_M25PE40:
12140 case FLASH_5720VENDOR_M_ST_M45PE40:
12141 case FLASH_5720VENDOR_A_ST_M25PE40:
12142 case FLASH_5720VENDOR_A_ST_M45PE40:
12143 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12145 case FLASH_5720VENDOR_M_ST_M25PE80:
12146 case FLASH_5720VENDOR_M_ST_M45PE80:
12147 case FLASH_5720VENDOR_A_ST_M25PE80:
12148 case FLASH_5720VENDOR_A_ST_M45PE80:
12149 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12152 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12157 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12161 tg3_nvram_get_pagesize(tp, nvcfg1);
12162 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12163 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12166 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12167 static void __devinit tg3_nvram_init(struct tg3 *tp)
12169 tw32_f(GRC_EEPROM_ADDR,
12170 (EEPROM_ADDR_FSM_RESET |
12171 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12172 EEPROM_ADDR_CLKPERD_SHIFT)));
12176 /* Enable seeprom accesses. */
12177 tw32_f(GRC_LOCAL_CTRL,
12178 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12181 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12182 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12183 tp->tg3_flags |= TG3_FLAG_NVRAM;
12185 if (tg3_nvram_lock(tp)) {
12186 netdev_warn(tp->dev,
12187 "Cannot get nvram lock, %s failed\n",
12191 tg3_enable_nvram_access(tp);
12193 tp->nvram_size = 0;
12195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12196 tg3_get_5752_nvram_info(tp);
12197 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12198 tg3_get_5755_nvram_info(tp);
12199 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12202 tg3_get_5787_nvram_info(tp);
12203 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12204 tg3_get_5761_nvram_info(tp);
12205 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12206 tg3_get_5906_nvram_info(tp);
12207 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12208 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12209 tg3_get_57780_nvram_info(tp);
12210 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12212 tg3_get_5717_nvram_info(tp);
12213 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12214 tg3_get_5720_nvram_info(tp);
12216 tg3_get_nvram_info(tp);
12218 if (tp->nvram_size == 0)
12219 tg3_get_nvram_size(tp);
12221 tg3_disable_nvram_access(tp);
12222 tg3_nvram_unlock(tp);
12225 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
12227 tg3_get_eeprom_size(tp);
12231 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12232 u32 offset, u32 len, u8 *buf)
12237 for (i = 0; i < len; i += 4) {
12243 memcpy(&data, buf + i, 4);
12246 * The SEEPROM interface expects the data to always be opposite
12247 * the native endian format. We accomplish this by reversing
12248 * all the operations that would have been performed on the
12249 * data from a call to tg3_nvram_read_be32().
12251 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12253 val = tr32(GRC_EEPROM_ADDR);
12254 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12256 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12258 tw32(GRC_EEPROM_ADDR, val |
12259 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12260 (addr & EEPROM_ADDR_ADDR_MASK) |
12261 EEPROM_ADDR_START |
12262 EEPROM_ADDR_WRITE);
12264 for (j = 0; j < 1000; j++) {
12265 val = tr32(GRC_EEPROM_ADDR);
12267 if (val & EEPROM_ADDR_COMPLETE)
12271 if (!(val & EEPROM_ADDR_COMPLETE)) {
12280 /* offset and length are dword aligned */
12281 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12285 u32 pagesize = tp->nvram_pagesize;
12286 u32 pagemask = pagesize - 1;
12290 tmp = kmalloc(pagesize, GFP_KERNEL);
12296 u32 phy_addr, page_off, size;
12298 phy_addr = offset & ~pagemask;
12300 for (j = 0; j < pagesize; j += 4) {
12301 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12302 (__be32 *) (tmp + j));
12309 page_off = offset & pagemask;
12316 memcpy(tmp + page_off, buf, size);
12318 offset = offset + (pagesize - page_off);
12320 tg3_enable_nvram_access(tp);
12323 * Before we can erase the flash page, we need
12324 * to issue a special "write enable" command.
12326 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12328 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12331 /* Erase the target page */
12332 tw32(NVRAM_ADDR, phy_addr);
12334 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12335 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12337 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12340 /* Issue another write enable to start the write. */
12341 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12343 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12346 for (j = 0; j < pagesize; j += 4) {
12349 data = *((__be32 *) (tmp + j));
12351 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12353 tw32(NVRAM_ADDR, phy_addr + j);
12355 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12359 nvram_cmd |= NVRAM_CMD_FIRST;
12360 else if (j == (pagesize - 4))
12361 nvram_cmd |= NVRAM_CMD_LAST;
12363 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12370 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12371 tg3_nvram_exec_cmd(tp, nvram_cmd);
12378 /* offset and length are dword aligned */
12379 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12384 for (i = 0; i < len; i += 4, offset += 4) {
12385 u32 page_off, phy_addr, nvram_cmd;
12388 memcpy(&data, buf + i, 4);
12389 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12391 page_off = offset % tp->nvram_pagesize;
12393 phy_addr = tg3_nvram_phys_addr(tp, offset);
12395 tw32(NVRAM_ADDR, phy_addr);
12397 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12399 if (page_off == 0 || i == 0)
12400 nvram_cmd |= NVRAM_CMD_FIRST;
12401 if (page_off == (tp->nvram_pagesize - 4))
12402 nvram_cmd |= NVRAM_CMD_LAST;
12404 if (i == (len - 4))
12405 nvram_cmd |= NVRAM_CMD_LAST;
12407 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12408 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12409 (tp->nvram_jedecnum == JEDEC_ST) &&
12410 (nvram_cmd & NVRAM_CMD_FIRST)) {
12412 if ((ret = tg3_nvram_exec_cmd(tp,
12413 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12418 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12419 /* We always do complete word writes to eeprom. */
12420 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12423 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12429 /* offset and length are dword aligned */
12430 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12434 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12435 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12436 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12440 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12441 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12445 ret = tg3_nvram_lock(tp);
12449 tg3_enable_nvram_access(tp);
12450 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12451 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12452 tw32(NVRAM_WRITE1, 0x406);
12454 grc_mode = tr32(GRC_MODE);
12455 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12457 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12458 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12460 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12463 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12467 grc_mode = tr32(GRC_MODE);
12468 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12470 tg3_disable_nvram_access(tp);
12471 tg3_nvram_unlock(tp);
12474 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12475 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12482 struct subsys_tbl_ent {
12483 u16 subsys_vendor, subsys_devid;
12487 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12488 /* Broadcom boards. */
12489 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12490 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12491 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12492 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12493 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12494 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12495 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12496 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12497 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12498 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12499 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12500 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12501 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12502 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12503 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12504 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12505 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12506 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12507 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12508 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12509 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12513 { TG3PCI_SUBVENDOR_ID_3COM,
12514 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12515 { TG3PCI_SUBVENDOR_ID_3COM,
12516 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12517 { TG3PCI_SUBVENDOR_ID_3COM,
12518 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12519 { TG3PCI_SUBVENDOR_ID_3COM,
12520 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12521 { TG3PCI_SUBVENDOR_ID_3COM,
12522 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12525 { TG3PCI_SUBVENDOR_ID_DELL,
12526 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12527 { TG3PCI_SUBVENDOR_ID_DELL,
12528 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12529 { TG3PCI_SUBVENDOR_ID_DELL,
12530 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12531 { TG3PCI_SUBVENDOR_ID_DELL,
12532 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12534 /* Compaq boards. */
12535 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12536 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12537 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12538 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12539 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12540 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12541 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12542 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12543 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12544 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12547 { TG3PCI_SUBVENDOR_ID_IBM,
12548 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12551 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12555 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12556 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12557 tp->pdev->subsystem_vendor) &&
12558 (subsys_id_to_phy_id[i].subsys_devid ==
12559 tp->pdev->subsystem_device))
12560 return &subsys_id_to_phy_id[i];
12565 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12570 /* On some early chips the SRAM cannot be accessed in D3hot state,
12571 * so need make sure we're in D0.
12573 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12574 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12575 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12578 /* Make sure register accesses (indirect or otherwise)
12579 * will function correctly.
12581 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12582 tp->misc_host_ctrl);
12584 /* The memory arbiter has to be enabled in order for SRAM accesses
12585 * to succeed. Normally on powerup the tg3 chip firmware will make
12586 * sure it is enabled, but other entities such as system netboot
12587 * code might disable it.
12589 val = tr32(MEMARB_MODE);
12590 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12592 tp->phy_id = TG3_PHY_ID_INVALID;
12593 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12595 /* Assume an onboard device and WOL capable by default. */
12596 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12599 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12600 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12601 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12603 val = tr32(VCPU_CFGSHDW);
12604 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12605 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12606 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12607 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12608 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12612 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12613 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12614 u32 nic_cfg, led_cfg;
12615 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12616 int eeprom_phy_serdes = 0;
12618 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12619 tp->nic_sram_data_cfg = nic_cfg;
12621 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12622 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12623 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12624 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12625 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12626 (ver > 0) && (ver < 0x100))
12627 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12630 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12632 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12633 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12634 eeprom_phy_serdes = 1;
12636 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12637 if (nic_phy_id != 0) {
12638 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12639 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12641 eeprom_phy_id = (id1 >> 16) << 10;
12642 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12643 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12647 tp->phy_id = eeprom_phy_id;
12648 if (eeprom_phy_serdes) {
12649 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12650 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12652 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12655 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12656 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12657 SHASTA_EXT_LED_MODE_MASK);
12659 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12663 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12664 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12667 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12668 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12671 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12672 tp->led_ctrl = LED_CTRL_MODE_MAC;
12674 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12675 * read on some older 5700/5701 bootcode.
12677 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12679 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12681 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12685 case SHASTA_EXT_LED_SHARED:
12686 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12687 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12688 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12689 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12690 LED_CTRL_MODE_PHY_2);
12693 case SHASTA_EXT_LED_MAC:
12694 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12697 case SHASTA_EXT_LED_COMBO:
12698 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12699 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12700 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12701 LED_CTRL_MODE_PHY_2);
12706 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12708 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12709 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12711 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12712 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12714 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12715 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12716 if ((tp->pdev->subsystem_vendor ==
12717 PCI_VENDOR_ID_ARIMA) &&
12718 (tp->pdev->subsystem_device == 0x205a ||
12719 tp->pdev->subsystem_device == 0x2063))
12720 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12722 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12723 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12726 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12727 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12728 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12729 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12732 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12733 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12734 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12736 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12737 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12738 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12740 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12741 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12742 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12744 if (cfg2 & (1 << 17))
12745 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12747 /* serdes signal pre-emphasis in register 0x590 set by */
12748 /* bootcode if bit 18 is set */
12749 if (cfg2 & (1 << 18))
12750 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12752 if (((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) ||
12753 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12754 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12755 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12756 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12758 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12759 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12760 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
12763 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12764 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12765 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12768 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12769 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12770 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12771 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12772 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12773 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12776 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12777 device_set_wakeup_enable(&tp->pdev->dev,
12778 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12780 device_set_wakeup_capable(&tp->pdev->dev, false);
12783 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12788 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12789 tw32(OTP_CTRL, cmd);
12791 /* Wait for up to 1 ms for command to execute. */
12792 for (i = 0; i < 100; i++) {
12793 val = tr32(OTP_STATUS);
12794 if (val & OTP_STATUS_CMD_DONE)
12799 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12802 /* Read the gphy configuration from the OTP region of the chip. The gphy
12803 * configuration is a 32-bit value that straddles the alignment boundary.
12804 * We do two 32-bit reads and then shift and merge the results.
12806 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12808 u32 bhalf_otp, thalf_otp;
12810 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12812 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12815 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12817 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12820 thalf_otp = tr32(OTP_READ_DATA);
12822 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12824 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12827 bhalf_otp = tr32(OTP_READ_DATA);
12829 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12832 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12834 u32 adv = ADVERTISED_Autoneg |
12837 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12838 adv |= ADVERTISED_1000baseT_Half |
12839 ADVERTISED_1000baseT_Full;
12841 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12842 adv |= ADVERTISED_100baseT_Half |
12843 ADVERTISED_100baseT_Full |
12844 ADVERTISED_10baseT_Half |
12845 ADVERTISED_10baseT_Full |
12848 adv |= ADVERTISED_FIBRE;
12850 tp->link_config.advertising = adv;
12851 tp->link_config.speed = SPEED_INVALID;
12852 tp->link_config.duplex = DUPLEX_INVALID;
12853 tp->link_config.autoneg = AUTONEG_ENABLE;
12854 tp->link_config.active_speed = SPEED_INVALID;
12855 tp->link_config.active_duplex = DUPLEX_INVALID;
12856 tp->link_config.orig_speed = SPEED_INVALID;
12857 tp->link_config.orig_duplex = DUPLEX_INVALID;
12858 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12861 static int __devinit tg3_phy_probe(struct tg3 *tp)
12863 u32 hw_phy_id_1, hw_phy_id_2;
12864 u32 hw_phy_id, hw_phy_id_masked;
12867 /* flow control autonegotiation is default behavior */
12868 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12869 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12871 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12872 return tg3_phy_init(tp);
12874 /* Reading the PHY ID register can conflict with ASF
12875 * firmware access to the PHY hardware.
12878 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12879 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12880 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12882 /* Now read the physical PHY_ID from the chip and verify
12883 * that it is sane. If it doesn't look good, we fall back
12884 * to either the hard-coded table based PHY_ID and failing
12885 * that the value found in the eeprom area.
12887 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12888 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12890 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12891 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12892 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12894 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12897 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12898 tp->phy_id = hw_phy_id;
12899 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12900 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12902 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12904 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12905 /* Do nothing, phy ID already set up in
12906 * tg3_get_eeprom_hw_cfg().
12909 struct subsys_tbl_ent *p;
12911 /* No eeprom signature? Try the hardcoded
12912 * subsys device table.
12914 p = tg3_lookup_by_subsys(tp);
12918 tp->phy_id = p->phy_id;
12920 tp->phy_id == TG3_PHY_ID_BCM8002)
12921 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12925 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12926 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12927 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12928 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12929 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12930 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12932 tg3_phy_init_link_config(tp);
12934 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12935 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12936 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12937 u32 bmsr, adv_reg, tg3_ctrl, mask;
12939 tg3_readphy(tp, MII_BMSR, &bmsr);
12940 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12941 (bmsr & BMSR_LSTATUS))
12942 goto skip_phy_reset;
12944 err = tg3_phy_reset(tp);
12948 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12949 ADVERTISE_100HALF | ADVERTISE_100FULL |
12950 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12952 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12953 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12954 MII_TG3_CTRL_ADV_1000_FULL);
12955 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12956 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12957 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12958 MII_TG3_CTRL_ENABLE_AS_MASTER);
12961 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12962 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12963 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12964 if (!tg3_copper_is_advertising_all(tp, mask)) {
12965 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12967 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12968 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12970 tg3_writephy(tp, MII_BMCR,
12971 BMCR_ANENABLE | BMCR_ANRESTART);
12973 tg3_phy_set_wirespeed(tp);
12975 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12976 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12977 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12981 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12982 err = tg3_init_5401phy_dsp(tp);
12986 err = tg3_init_5401phy_dsp(tp);
12992 static void __devinit tg3_read_vpd(struct tg3 *tp)
12995 unsigned int block_end, rosize, len;
12998 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13002 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13003 PCI_VPD_LRDT_RO_DATA);
13005 goto out_not_found;
13007 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13008 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13009 i += PCI_VPD_LRDT_TAG_SIZE;
13011 if (block_end > TG3_NVM_VPD_LEN)
13012 goto out_not_found;
13014 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13015 PCI_VPD_RO_KEYWORD_MFR_ID);
13017 len = pci_vpd_info_field_size(&vpd_data[j]);
13019 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13020 if (j + len > block_end || len != 4 ||
13021 memcmp(&vpd_data[j], "1028", 4))
13024 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13025 PCI_VPD_RO_KEYWORD_VENDOR0);
13029 len = pci_vpd_info_field_size(&vpd_data[j]);
13031 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13032 if (j + len > block_end)
13035 memcpy(tp->fw_ver, &vpd_data[j], len);
13036 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13040 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13041 PCI_VPD_RO_KEYWORD_PARTNO);
13043 goto out_not_found;
13045 len = pci_vpd_info_field_size(&vpd_data[i]);
13047 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13048 if (len > TG3_BPN_SIZE ||
13049 (len + i) > TG3_NVM_VPD_LEN)
13050 goto out_not_found;
13052 memcpy(tp->board_part_number, &vpd_data[i], len);
13056 if (tp->board_part_number[0])
13060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13062 strcpy(tp->board_part_number, "BCM5717");
13063 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13064 strcpy(tp->board_part_number, "BCM5718");
13067 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13068 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13069 strcpy(tp->board_part_number, "BCM57780");
13070 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13071 strcpy(tp->board_part_number, "BCM57760");
13072 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13073 strcpy(tp->board_part_number, "BCM57790");
13074 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13075 strcpy(tp->board_part_number, "BCM57788");
13078 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13079 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13080 strcpy(tp->board_part_number, "BCM57761");
13081 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13082 strcpy(tp->board_part_number, "BCM57765");
13083 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13084 strcpy(tp->board_part_number, "BCM57781");
13085 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13086 strcpy(tp->board_part_number, "BCM57785");
13087 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13088 strcpy(tp->board_part_number, "BCM57791");
13089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13090 strcpy(tp->board_part_number, "BCM57795");
13093 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13094 strcpy(tp->board_part_number, "BCM95906");
13097 strcpy(tp->board_part_number, "none");
13101 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13105 if (tg3_nvram_read(tp, offset, &val) ||
13106 (val & 0xfc000000) != 0x0c000000 ||
13107 tg3_nvram_read(tp, offset + 4, &val) ||
13114 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13116 u32 val, offset, start, ver_offset;
13118 bool newver = false;
13120 if (tg3_nvram_read(tp, 0xc, &offset) ||
13121 tg3_nvram_read(tp, 0x4, &start))
13124 offset = tg3_nvram_logical_addr(tp, offset);
13126 if (tg3_nvram_read(tp, offset, &val))
13129 if ((val & 0xfc000000) == 0x0c000000) {
13130 if (tg3_nvram_read(tp, offset + 4, &val))
13137 dst_off = strlen(tp->fw_ver);
13140 if (TG3_VER_SIZE - dst_off < 16 ||
13141 tg3_nvram_read(tp, offset + 8, &ver_offset))
13144 offset = offset + ver_offset - start;
13145 for (i = 0; i < 16; i += 4) {
13147 if (tg3_nvram_read_be32(tp, offset + i, &v))
13150 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13155 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13158 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13159 TG3_NVM_BCVER_MAJSFT;
13160 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13161 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13162 "v%d.%02d", major, minor);
13166 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13168 u32 val, major, minor;
13170 /* Use native endian representation */
13171 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13174 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13175 TG3_NVM_HWSB_CFG1_MAJSFT;
13176 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13177 TG3_NVM_HWSB_CFG1_MINSFT;
13179 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13182 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13184 u32 offset, major, minor, build;
13186 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13188 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13191 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13192 case TG3_EEPROM_SB_REVISION_0:
13193 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13195 case TG3_EEPROM_SB_REVISION_2:
13196 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13198 case TG3_EEPROM_SB_REVISION_3:
13199 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13201 case TG3_EEPROM_SB_REVISION_4:
13202 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13204 case TG3_EEPROM_SB_REVISION_5:
13205 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13207 case TG3_EEPROM_SB_REVISION_6:
13208 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13214 if (tg3_nvram_read(tp, offset, &val))
13217 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13218 TG3_EEPROM_SB_EDH_BLD_SHFT;
13219 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13220 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13221 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13223 if (minor > 99 || build > 26)
13226 offset = strlen(tp->fw_ver);
13227 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13228 " v%d.%02d", major, minor);
13231 offset = strlen(tp->fw_ver);
13232 if (offset < TG3_VER_SIZE - 1)
13233 tp->fw_ver[offset] = 'a' + build - 1;
13237 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13239 u32 val, offset, start;
13242 for (offset = TG3_NVM_DIR_START;
13243 offset < TG3_NVM_DIR_END;
13244 offset += TG3_NVM_DIRENT_SIZE) {
13245 if (tg3_nvram_read(tp, offset, &val))
13248 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13252 if (offset == TG3_NVM_DIR_END)
13255 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
13256 start = 0x08000000;
13257 else if (tg3_nvram_read(tp, offset - 4, &start))
13260 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13261 !tg3_fw_img_is_valid(tp, offset) ||
13262 tg3_nvram_read(tp, offset + 8, &val))
13265 offset += val - start;
13267 vlen = strlen(tp->fw_ver);
13269 tp->fw_ver[vlen++] = ',';
13270 tp->fw_ver[vlen++] = ' ';
13272 for (i = 0; i < 4; i++) {
13274 if (tg3_nvram_read_be32(tp, offset, &v))
13277 offset += sizeof(v);
13279 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13280 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13284 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13289 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13295 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
13296 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
13299 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13300 if (apedata != APE_SEG_SIG_MAGIC)
13303 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13304 if (!(apedata & APE_FW_STATUS_READY))
13307 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13309 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13310 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
13316 vlen = strlen(tp->fw_ver);
13318 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13320 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13321 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13322 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13323 (apedata & APE_FW_VERSION_BLDMSK));
13326 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13329 bool vpd_vers = false;
13331 if (tp->fw_ver[0] != 0)
13334 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
13335 strcat(tp->fw_ver, "sb");
13339 if (tg3_nvram_read(tp, 0, &val))
13342 if (val == TG3_EEPROM_MAGIC)
13343 tg3_read_bc_ver(tp);
13344 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13345 tg3_read_sb_ver(tp, val);
13346 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13347 tg3_read_hwsb_ver(tp);
13351 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13352 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
13355 tg3_read_mgmtfw_ver(tp);
13358 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13361 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13363 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13365 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
13366 return TG3_RX_RET_MAX_SIZE_5717;
13367 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13368 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13369 return TG3_RX_RET_MAX_SIZE_5700;
13371 return TG3_RX_RET_MAX_SIZE_5705;
13374 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13375 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13376 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13377 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13381 static int __devinit tg3_get_invariants(struct tg3 *tp)
13384 u32 pci_state_reg, grc_misc_cfg;
13389 /* Force memory write invalidate off. If we leave it on,
13390 * then on 5700_BX chips we have to enable a workaround.
13391 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13392 * to match the cacheline size. The Broadcom driver have this
13393 * workaround but turns MWI off all the times so never uses
13394 * it. This seems to suggest that the workaround is insufficient.
13396 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13397 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13398 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13400 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13401 * has the register indirect write enable bit set before
13402 * we try to access any of the MMIO registers. It is also
13403 * critical that the PCI-X hw workaround situation is decided
13404 * before that as well.
13406 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13409 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13410 MISC_HOST_CTRL_CHIPREV_SHIFT);
13411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13412 u32 prod_id_asic_rev;
13414 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13415 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13416 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13417 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13418 pci_read_config_dword(tp->pdev,
13419 TG3PCI_GEN2_PRODID_ASICREV,
13420 &prod_id_asic_rev);
13421 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13422 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13423 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13424 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13427 pci_read_config_dword(tp->pdev,
13428 TG3PCI_GEN15_PRODID_ASICREV,
13429 &prod_id_asic_rev);
13431 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13432 &prod_id_asic_rev);
13434 tp->pci_chip_rev_id = prod_id_asic_rev;
13437 /* Wrong chip ID in 5752 A0. This code can be removed later
13438 * as A0 is not in production.
13440 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13441 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13443 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13444 * we need to disable memory and use config. cycles
13445 * only to access all registers. The 5702/03 chips
13446 * can mistakenly decode the special cycles from the
13447 * ICH chipsets as memory write cycles, causing corruption
13448 * of register and memory space. Only certain ICH bridges
13449 * will drive special cycles with non-zero data during the
13450 * address phase which can fall within the 5703's address
13451 * range. This is not an ICH bug as the PCI spec allows
13452 * non-zero address during special cycles. However, only
13453 * these ICH bridges are known to drive non-zero addresses
13454 * during special cycles.
13456 * Since special cycles do not cross PCI bridges, we only
13457 * enable this workaround if the 5703 is on the secondary
13458 * bus of these ICH bridges.
13460 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13461 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13462 static struct tg3_dev_id {
13466 } ich_chipsets[] = {
13467 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13469 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13471 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13473 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13477 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13478 struct pci_dev *bridge = NULL;
13480 while (pci_id->vendor != 0) {
13481 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13487 if (pci_id->rev != PCI_ANY_ID) {
13488 if (bridge->revision > pci_id->rev)
13491 if (bridge->subordinate &&
13492 (bridge->subordinate->number ==
13493 tp->pdev->bus->number)) {
13495 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13496 pci_dev_put(bridge);
13502 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13503 static struct tg3_dev_id {
13506 } bridge_chipsets[] = {
13507 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13508 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13511 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13512 struct pci_dev *bridge = NULL;
13514 while (pci_id->vendor != 0) {
13515 bridge = pci_get_device(pci_id->vendor,
13522 if (bridge->subordinate &&
13523 (bridge->subordinate->number <=
13524 tp->pdev->bus->number) &&
13525 (bridge->subordinate->subordinate >=
13526 tp->pdev->bus->number)) {
13527 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13528 pci_dev_put(bridge);
13534 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13535 * DMA addresses > 40-bit. This bridge may have other additional
13536 * 57xx devices behind it in some 4-port NIC designs for example.
13537 * Any tg3 device found behind the bridge will also need the 40-bit
13540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13542 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13543 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13544 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13546 struct pci_dev *bridge = NULL;
13549 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13550 PCI_DEVICE_ID_SERVERWORKS_EPB,
13552 if (bridge && bridge->subordinate &&
13553 (bridge->subordinate->number <=
13554 tp->pdev->bus->number) &&
13555 (bridge->subordinate->subordinate >=
13556 tp->pdev->bus->number)) {
13557 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13558 pci_dev_put(bridge);
13564 /* Initialize misc host control in PCI block. */
13565 tp->misc_host_ctrl |= (misc_ctrl_reg &
13566 MISC_HOST_CTRL_CHIPREV);
13567 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13568 tp->misc_host_ctrl);
13570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13574 tp->pdev_peer = tg3_find_peer(tp);
13576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13579 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13582 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13583 tp->tg3_flags3 |= TG3_FLG3_57765_PLUS;
13585 /* Intentionally exclude ASIC_REV_5906 */
13586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13592 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13593 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13596 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13598 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13599 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13600 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13602 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13603 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13604 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13606 /* 5700 B0 chips do not support checksumming correctly due
13607 * to hardware bugs.
13609 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13610 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13612 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13613 features |= NETIF_F_IPV6_CSUM;
13614 tp->dev->features |= features;
13615 tp->dev->hw_features |= features;
13616 tp->dev->vlan_features |= features;
13619 /* Determine TSO capabilities */
13620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13621 ; /* Do nothing. HW bug. */
13622 else if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
13623 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13624 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13626 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13627 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13628 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13630 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13631 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13632 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13633 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13634 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13635 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13637 tp->fw_needed = FIRMWARE_TG3TSO5;
13639 tp->fw_needed = FIRMWARE_TG3TSO;
13644 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13645 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13646 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13647 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13648 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13649 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13650 tp->pdev_peer == tp->pdev))
13651 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13653 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13655 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13658 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
13659 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13660 tp->irq_max = TG3_IRQ_MAX_VECS;
13664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13667 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13668 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13669 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13670 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13673 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13674 tp->tg3_flags3 |= TG3_FLG3_LRG_PROD_RING_CAP;
13676 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
13677 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13678 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13680 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13682 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13683 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13685 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13688 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13689 if (tp->pcie_cap != 0) {
13692 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13694 tp->pcie_readrq = 4096;
13695 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13697 tp->pcie_readrq = 2048;
13699 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13701 pci_read_config_word(tp->pdev,
13702 tp->pcie_cap + PCI_EXP_LNKCTL,
13704 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13706 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13707 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13709 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13710 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13711 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13712 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13713 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13715 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13716 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13717 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13718 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13719 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13720 if (!tp->pcix_cap) {
13721 dev_err(&tp->pdev->dev,
13722 "Cannot find PCI-X capability, aborting\n");
13726 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13727 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13730 /* If we have an AMD 762 or VIA K8T800 chipset, write
13731 * reordering to the mailbox registers done by the host
13732 * controller can cause major troubles. We read back from
13733 * every mailbox register write to force the writes to be
13734 * posted to the chip in order.
13736 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13737 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13738 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13740 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13741 &tp->pci_cacheline_sz);
13742 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13743 &tp->pci_lat_timer);
13744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13745 tp->pci_lat_timer < 64) {
13746 tp->pci_lat_timer = 64;
13747 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13748 tp->pci_lat_timer);
13751 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13752 /* 5700 BX chips need to have their TX producer index
13753 * mailboxes written twice to workaround a bug.
13755 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13757 /* If we are in PCI-X mode, enable register write workaround.
13759 * The workaround is to use indirect register accesses
13760 * for all chip writes not to mailbox registers.
13762 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13765 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13767 /* The chip can have it's power management PCI config
13768 * space registers clobbered due to this bug.
13769 * So explicitly force the chip into D0 here.
13771 pci_read_config_dword(tp->pdev,
13772 tp->pm_cap + PCI_PM_CTRL,
13774 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13775 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13776 pci_write_config_dword(tp->pdev,
13777 tp->pm_cap + PCI_PM_CTRL,
13780 /* Also, force SERR#/PERR# in PCI command. */
13781 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13782 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13783 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13787 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13788 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13789 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13790 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13792 /* Chip-specific fixup from Broadcom driver */
13793 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13794 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13795 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13796 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13799 /* Default fast path register access methods */
13800 tp->read32 = tg3_read32;
13801 tp->write32 = tg3_write32;
13802 tp->read32_mbox = tg3_read32;
13803 tp->write32_mbox = tg3_write32;
13804 tp->write32_tx_mbox = tg3_write32;
13805 tp->write32_rx_mbox = tg3_write32;
13807 /* Various workaround register access methods */
13808 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13809 tp->write32 = tg3_write_indirect_reg32;
13810 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13811 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13812 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13814 * Back to back register writes can cause problems on these
13815 * chips, the workaround is to read back all reg writes
13816 * except those to mailbox regs.
13818 * See tg3_write_indirect_reg32().
13820 tp->write32 = tg3_write_flush_reg32;
13823 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13824 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13825 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13826 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13827 tp->write32_rx_mbox = tg3_write_flush_reg32;
13830 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13831 tp->read32 = tg3_read_indirect_reg32;
13832 tp->write32 = tg3_write_indirect_reg32;
13833 tp->read32_mbox = tg3_read_indirect_mbox;
13834 tp->write32_mbox = tg3_write_indirect_mbox;
13835 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13836 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13841 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13842 pci_cmd &= ~PCI_COMMAND_MEMORY;
13843 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13845 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13846 tp->read32_mbox = tg3_read32_mbox_5906;
13847 tp->write32_mbox = tg3_write32_mbox_5906;
13848 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13849 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13852 if (tp->write32 == tg3_write_indirect_reg32 ||
13853 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13854 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13856 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13858 /* Get eeprom hw config before calling tg3_set_power_state().
13859 * In particular, the TG3_FLG2_IS_NIC flag must be
13860 * determined before calling tg3_set_power_state() so that
13861 * we know whether or not to switch out of Vaux power.
13862 * When the flag is set, it means that GPIO1 is used for eeprom
13863 * write protect and also implies that it is a LOM where GPIOs
13864 * are not used to switch power.
13866 tg3_get_eeprom_hw_cfg(tp);
13868 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13869 /* Allow reads and writes to the
13870 * APE register and memory space.
13872 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13873 PCISTATE_ALLOW_APE_SHMEM_WR |
13874 PCISTATE_ALLOW_APE_PSPACE_WR;
13875 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13882 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13883 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13884 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13886 /* Set up tp->grc_local_ctrl before calling tg_power_up().
13887 * GPIO1 driven high will bring 5700's external PHY out of reset.
13888 * It is also used as eeprom write protect on LOMs.
13890 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13891 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13892 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13893 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13894 GRC_LCLCTRL_GPIO_OUTPUT1);
13895 /* Unused GPIO3 must be driven as output on 5752 because there
13896 * are no pull-up resistors on unused GPIO pins.
13898 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13899 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13904 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13906 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13907 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13908 /* Turn off the debug UART. */
13909 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13910 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13911 /* Keep VMain power. */
13912 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13913 GRC_LCLCTRL_GPIO_OUTPUT0;
13916 /* Force the chip into D0. */
13917 err = tg3_power_up(tp);
13919 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13923 /* Derive initial jumbo mode from MTU assigned in
13924 * ether_setup() via the alloc_etherdev() call
13926 if (tp->dev->mtu > ETH_DATA_LEN &&
13927 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13928 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13930 /* Determine WakeOnLan speed to use. */
13931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13932 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13933 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13934 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13935 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13937 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13941 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13943 /* A few boards don't want Ethernet@WireSpeed phy feature */
13944 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13945 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13946 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13947 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13948 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13949 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13950 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13952 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13953 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13954 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13955 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13956 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13958 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13959 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13960 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13961 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13962 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
13963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13967 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13968 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13969 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13970 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13971 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13973 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13977 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13978 tp->phy_otp = tg3_read_otp_phycfg(tp);
13979 if (tp->phy_otp == 0)
13980 tp->phy_otp = TG3_OTP_DEFAULT;
13983 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13984 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13986 tp->mi_mode = MAC_MI_MODE_BASE;
13988 tp->coalesce_mode = 0;
13989 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13990 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13991 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13993 /* Set these bits to enable statistics workaround. */
13994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13995 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
13996 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
13997 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
13998 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14003 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
14005 err = tg3_mdio_init(tp);
14009 /* Initialize data/descriptor byte/word swapping. */
14010 val = tr32(GRC_MODE);
14011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14012 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14013 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14014 GRC_MODE_B2HRX_ENABLE |
14015 GRC_MODE_HTX2B_ENABLE |
14016 GRC_MODE_HOST_STACKUP);
14018 val &= GRC_MODE_HOST_STACKUP;
14020 tw32(GRC_MODE, val | tp->grc_mode);
14022 tg3_switch_clocks(tp);
14024 /* Clear this out for sanity. */
14025 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14027 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14029 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14030 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
14031 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14033 if (chiprevid == CHIPREV_ID_5701_A0 ||
14034 chiprevid == CHIPREV_ID_5701_B0 ||
14035 chiprevid == CHIPREV_ID_5701_B2 ||
14036 chiprevid == CHIPREV_ID_5701_B5) {
14037 void __iomem *sram_base;
14039 /* Write some dummy words into the SRAM status block
14040 * area, see if it reads back correctly. If the return
14041 * value is bad, force enable the PCIX workaround.
14043 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14045 writel(0x00000000, sram_base);
14046 writel(0x00000000, sram_base + 4);
14047 writel(0xffffffff, sram_base + 4);
14048 if (readl(sram_base) != 0x00000000)
14049 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
14054 tg3_nvram_init(tp);
14056 grc_misc_cfg = tr32(GRC_MISC_CFG);
14057 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14060 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14061 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14062 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
14064 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
14065 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14066 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
14067 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
14068 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14069 HOSTCC_MODE_CLRTICK_TXBD);
14071 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14072 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14073 tp->misc_host_ctrl);
14076 /* Preserve the APE MAC_MODE bits */
14077 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
14078 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14080 tp->mac_mode = TG3_DEF_MAC_MODE;
14082 /* these are limited to 10/100 only */
14083 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14084 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14085 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14086 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14087 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14088 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14089 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14090 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14091 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14092 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14093 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14097 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14098 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14100 err = tg3_phy_probe(tp);
14102 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14103 /* ... but do not return immediately ... */
14108 tg3_read_fw_ver(tp);
14110 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14111 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14114 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14116 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14119 /* 5700 {AX,BX} chips have a broken status block link
14120 * change bit implementation, so we must use the
14121 * status register in those cases.
14123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14124 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14126 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
14128 /* The led_ctrl is set during tg3_phy_probe, here we might
14129 * have to force the link status polling mechanism based
14130 * upon subsystem IDs.
14132 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14134 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14135 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14136 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14139 /* For all SERDES we poll the MAC status register. */
14140 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14141 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
14143 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
14145 tp->rx_offset = NET_IP_ALIGN;
14146 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14148 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
14150 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14151 tp->rx_copy_thresh = ~(u16)0;
14155 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14156 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14157 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14159 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14161 /* Increment the rx prod index on the rx std ring by at most
14162 * 8 for these chips to workaround hw errata.
14164 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14165 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14167 tp->rx_std_max_post = 8;
14169 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
14170 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14171 PCIE_PWR_MGMT_L1_THRESH_MSK;
14176 #ifdef CONFIG_SPARC
14177 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14179 struct net_device *dev = tp->dev;
14180 struct pci_dev *pdev = tp->pdev;
14181 struct device_node *dp = pci_device_to_OF_node(pdev);
14182 const unsigned char *addr;
14185 addr = of_get_property(dp, "local-mac-address", &len);
14186 if (addr && len == 6) {
14187 memcpy(dev->dev_addr, addr, 6);
14188 memcpy(dev->perm_addr, dev->dev_addr, 6);
14194 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14196 struct net_device *dev = tp->dev;
14198 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14199 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14204 static int __devinit tg3_get_device_address(struct tg3 *tp)
14206 struct net_device *dev = tp->dev;
14207 u32 hi, lo, mac_offset;
14210 #ifdef CONFIG_SPARC
14211 if (!tg3_get_macaddr_sparc(tp))
14216 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14217 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
14218 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14220 if (tg3_nvram_lock(tp))
14221 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14223 tg3_nvram_unlock(tp);
14224 } else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14225 if (PCI_FUNC(tp->pdev->devfn) & 1)
14227 if (PCI_FUNC(tp->pdev->devfn) > 1)
14228 mac_offset += 0x18c;
14229 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14232 /* First try to get it from MAC address mailbox. */
14233 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14234 if ((hi >> 16) == 0x484b) {
14235 dev->dev_addr[0] = (hi >> 8) & 0xff;
14236 dev->dev_addr[1] = (hi >> 0) & 0xff;
14238 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14239 dev->dev_addr[2] = (lo >> 24) & 0xff;
14240 dev->dev_addr[3] = (lo >> 16) & 0xff;
14241 dev->dev_addr[4] = (lo >> 8) & 0xff;
14242 dev->dev_addr[5] = (lo >> 0) & 0xff;
14244 /* Some old bootcode may report a 0 MAC address in SRAM */
14245 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14248 /* Next, try NVRAM. */
14249 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
14250 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14251 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14252 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14253 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14255 /* Finally just fetch it out of the MAC control regs. */
14257 hi = tr32(MAC_ADDR_0_HIGH);
14258 lo = tr32(MAC_ADDR_0_LOW);
14260 dev->dev_addr[5] = lo & 0xff;
14261 dev->dev_addr[4] = (lo >> 8) & 0xff;
14262 dev->dev_addr[3] = (lo >> 16) & 0xff;
14263 dev->dev_addr[2] = (lo >> 24) & 0xff;
14264 dev->dev_addr[1] = hi & 0xff;
14265 dev->dev_addr[0] = (hi >> 8) & 0xff;
14269 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14270 #ifdef CONFIG_SPARC
14271 if (!tg3_get_default_macaddr_sparc(tp))
14276 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14280 #define BOUNDARY_SINGLE_CACHELINE 1
14281 #define BOUNDARY_MULTI_CACHELINE 2
14283 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14285 int cacheline_size;
14289 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14291 cacheline_size = 1024;
14293 cacheline_size = (int) byte * 4;
14295 /* On 5703 and later chips, the boundary bits have no
14298 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14299 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14300 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
14303 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14304 goal = BOUNDARY_MULTI_CACHELINE;
14306 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14307 goal = BOUNDARY_SINGLE_CACHELINE;
14313 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14314 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14321 /* PCI controllers on most RISC systems tend to disconnect
14322 * when a device tries to burst across a cache-line boundary.
14323 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14325 * Unfortunately, for PCI-E there are only limited
14326 * write-side controls for this, and thus for reads
14327 * we will still get the disconnects. We'll also waste
14328 * these PCI cycles for both read and write for chips
14329 * other than 5700 and 5701 which do not implement the
14332 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
14333 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
14334 switch (cacheline_size) {
14339 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14340 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14341 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14343 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14344 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14349 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14350 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14354 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14355 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14358 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14359 switch (cacheline_size) {
14363 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14364 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14365 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14371 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14372 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14376 switch (cacheline_size) {
14378 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14379 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14380 DMA_RWCTRL_WRITE_BNDRY_16);
14385 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14386 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14387 DMA_RWCTRL_WRITE_BNDRY_32);
14392 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14393 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14394 DMA_RWCTRL_WRITE_BNDRY_64);
14399 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14400 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14401 DMA_RWCTRL_WRITE_BNDRY_128);
14406 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14407 DMA_RWCTRL_WRITE_BNDRY_256);
14410 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14411 DMA_RWCTRL_WRITE_BNDRY_512);
14415 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14416 DMA_RWCTRL_WRITE_BNDRY_1024);
14425 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14427 struct tg3_internal_buffer_desc test_desc;
14428 u32 sram_dma_descs;
14431 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14433 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14434 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14435 tw32(RDMAC_STATUS, 0);
14436 tw32(WDMAC_STATUS, 0);
14438 tw32(BUFMGR_MODE, 0);
14439 tw32(FTQ_RESET, 0);
14441 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14442 test_desc.addr_lo = buf_dma & 0xffffffff;
14443 test_desc.nic_mbuf = 0x00002100;
14444 test_desc.len = size;
14447 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14448 * the *second* time the tg3 driver was getting loaded after an
14451 * Broadcom tells me:
14452 * ...the DMA engine is connected to the GRC block and a DMA
14453 * reset may affect the GRC block in some unpredictable way...
14454 * The behavior of resets to individual blocks has not been tested.
14456 * Broadcom noted the GRC reset will also reset all sub-components.
14459 test_desc.cqid_sqid = (13 << 8) | 2;
14461 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14464 test_desc.cqid_sqid = (16 << 8) | 7;
14466 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14469 test_desc.flags = 0x00000005;
14471 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14474 val = *(((u32 *)&test_desc) + i);
14475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14476 sram_dma_descs + (i * sizeof(u32)));
14477 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14479 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14482 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14484 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14487 for (i = 0; i < 40; i++) {
14491 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14493 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14494 if ((val & 0xffff) == sram_dma_descs) {
14505 #define TEST_BUFFER_SIZE 0x2000
14507 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14508 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14512 static int __devinit tg3_test_dma(struct tg3 *tp)
14514 dma_addr_t buf_dma;
14515 u32 *buf, saved_dma_rwctrl;
14518 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14519 &buf_dma, GFP_KERNEL);
14525 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14526 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14528 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14530 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
14533 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14534 /* DMA read watermark not used on PCIE */
14535 tp->dma_rwctrl |= 0x00180000;
14536 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14537 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14539 tp->dma_rwctrl |= 0x003f0000;
14541 tp->dma_rwctrl |= 0x003f000f;
14543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14545 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14546 u32 read_water = 0x7;
14548 /* If the 5704 is behind the EPB bridge, we can
14549 * do the less restrictive ONE_DMA workaround for
14550 * better performance.
14552 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14554 tp->dma_rwctrl |= 0x8000;
14555 else if (ccval == 0x6 || ccval == 0x7)
14556 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14560 /* Set bit 23 to enable PCIX hw bug fix */
14562 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14563 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14565 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14566 /* 5780 always in PCIX mode */
14567 tp->dma_rwctrl |= 0x00144000;
14568 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14569 /* 5714 always in PCIX mode */
14570 tp->dma_rwctrl |= 0x00148000;
14572 tp->dma_rwctrl |= 0x001b000f;
14576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14578 tp->dma_rwctrl &= 0xfffffff0;
14580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14582 /* Remove this if it causes problems for some boards. */
14583 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14585 /* On 5700/5701 chips, we need to set this bit.
14586 * Otherwise the chip will issue cacheline transactions
14587 * to streamable DMA memory with not all the byte
14588 * enables turned on. This is an error on several
14589 * RISC PCI controllers, in particular sparc64.
14591 * On 5703/5704 chips, this bit has been reassigned
14592 * a different meaning. In particular, it is used
14593 * on those chips to enable a PCI-X workaround.
14595 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14598 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14601 /* Unneeded, already done by tg3_get_invariants. */
14602 tg3_switch_clocks(tp);
14605 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14606 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14609 /* It is best to perform DMA test with maximum write burst size
14610 * to expose the 5700/5701 write DMA bug.
14612 saved_dma_rwctrl = tp->dma_rwctrl;
14613 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14614 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14619 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14622 /* Send the buffer to the chip. */
14623 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14625 dev_err(&tp->pdev->dev,
14626 "%s: Buffer write failed. err = %d\n",
14632 /* validate data reached card RAM correctly. */
14633 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14635 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14636 if (le32_to_cpu(val) != p[i]) {
14637 dev_err(&tp->pdev->dev,
14638 "%s: Buffer corrupted on device! "
14639 "(%d != %d)\n", __func__, val, i);
14640 /* ret = -ENODEV here? */
14645 /* Now read it back. */
14646 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14648 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14649 "err = %d\n", __func__, ret);
14654 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14658 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14659 DMA_RWCTRL_WRITE_BNDRY_16) {
14660 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14661 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14662 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14665 dev_err(&tp->pdev->dev,
14666 "%s: Buffer corrupted on read back! "
14667 "(%d != %d)\n", __func__, p[i], i);
14673 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14679 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14680 DMA_RWCTRL_WRITE_BNDRY_16) {
14682 /* DMA test passed without adjusting DMA boundary,
14683 * now look for chipsets that are known to expose the
14684 * DMA bug without failing the test.
14686 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14687 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14688 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14690 /* Safe to use the calculated DMA boundary. */
14691 tp->dma_rwctrl = saved_dma_rwctrl;
14694 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14698 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14703 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14705 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14706 tp->bufmgr_config.mbuf_read_dma_low_water =
14707 DEFAULT_MB_RDMA_LOW_WATER_5705;
14708 tp->bufmgr_config.mbuf_mac_rx_low_water =
14709 DEFAULT_MB_MACRX_LOW_WATER_57765;
14710 tp->bufmgr_config.mbuf_high_water =
14711 DEFAULT_MB_HIGH_WATER_57765;
14713 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14714 DEFAULT_MB_RDMA_LOW_WATER_5705;
14715 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14716 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14717 tp->bufmgr_config.mbuf_high_water_jumbo =
14718 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14719 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14720 tp->bufmgr_config.mbuf_read_dma_low_water =
14721 DEFAULT_MB_RDMA_LOW_WATER_5705;
14722 tp->bufmgr_config.mbuf_mac_rx_low_water =
14723 DEFAULT_MB_MACRX_LOW_WATER_5705;
14724 tp->bufmgr_config.mbuf_high_water =
14725 DEFAULT_MB_HIGH_WATER_5705;
14726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14727 tp->bufmgr_config.mbuf_mac_rx_low_water =
14728 DEFAULT_MB_MACRX_LOW_WATER_5906;
14729 tp->bufmgr_config.mbuf_high_water =
14730 DEFAULT_MB_HIGH_WATER_5906;
14733 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14734 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14735 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14736 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14737 tp->bufmgr_config.mbuf_high_water_jumbo =
14738 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14740 tp->bufmgr_config.mbuf_read_dma_low_water =
14741 DEFAULT_MB_RDMA_LOW_WATER;
14742 tp->bufmgr_config.mbuf_mac_rx_low_water =
14743 DEFAULT_MB_MACRX_LOW_WATER;
14744 tp->bufmgr_config.mbuf_high_water =
14745 DEFAULT_MB_HIGH_WATER;
14747 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14748 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14749 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14750 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14751 tp->bufmgr_config.mbuf_high_water_jumbo =
14752 DEFAULT_MB_HIGH_WATER_JUMBO;
14755 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14756 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14759 static char * __devinit tg3_phy_string(struct tg3 *tp)
14761 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14762 case TG3_PHY_ID_BCM5400: return "5400";
14763 case TG3_PHY_ID_BCM5401: return "5401";
14764 case TG3_PHY_ID_BCM5411: return "5411";
14765 case TG3_PHY_ID_BCM5701: return "5701";
14766 case TG3_PHY_ID_BCM5703: return "5703";
14767 case TG3_PHY_ID_BCM5704: return "5704";
14768 case TG3_PHY_ID_BCM5705: return "5705";
14769 case TG3_PHY_ID_BCM5750: return "5750";
14770 case TG3_PHY_ID_BCM5752: return "5752";
14771 case TG3_PHY_ID_BCM5714: return "5714";
14772 case TG3_PHY_ID_BCM5780: return "5780";
14773 case TG3_PHY_ID_BCM5755: return "5755";
14774 case TG3_PHY_ID_BCM5787: return "5787";
14775 case TG3_PHY_ID_BCM5784: return "5784";
14776 case TG3_PHY_ID_BCM5756: return "5722/5756";
14777 case TG3_PHY_ID_BCM5906: return "5906";
14778 case TG3_PHY_ID_BCM5761: return "5761";
14779 case TG3_PHY_ID_BCM5718C: return "5718C";
14780 case TG3_PHY_ID_BCM5718S: return "5718S";
14781 case TG3_PHY_ID_BCM57765: return "57765";
14782 case TG3_PHY_ID_BCM5719C: return "5719C";
14783 case TG3_PHY_ID_BCM5720C: return "5720C";
14784 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14785 case 0: return "serdes";
14786 default: return "unknown";
14790 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14792 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14793 strcpy(str, "PCI Express");
14795 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14796 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14798 strcpy(str, "PCIX:");
14800 if ((clock_ctrl == 7) ||
14801 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14802 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14803 strcat(str, "133MHz");
14804 else if (clock_ctrl == 0)
14805 strcat(str, "33MHz");
14806 else if (clock_ctrl == 2)
14807 strcat(str, "50MHz");
14808 else if (clock_ctrl == 4)
14809 strcat(str, "66MHz");
14810 else if (clock_ctrl == 6)
14811 strcat(str, "100MHz");
14813 strcpy(str, "PCI:");
14814 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14815 strcat(str, "66MHz");
14817 strcat(str, "33MHz");
14819 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14820 strcat(str, ":32-bit");
14822 strcat(str, ":64-bit");
14826 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14828 struct pci_dev *peer;
14829 unsigned int func, devnr = tp->pdev->devfn & ~7;
14831 for (func = 0; func < 8; func++) {
14832 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14833 if (peer && peer != tp->pdev)
14837 /* 5704 can be configured in single-port mode, set peer to
14838 * tp->pdev in that case.
14846 * We don't need to keep the refcount elevated; there's no way
14847 * to remove one half of this device without removing the other
14854 static void __devinit tg3_init_coal(struct tg3 *tp)
14856 struct ethtool_coalesce *ec = &tp->coal;
14858 memset(ec, 0, sizeof(*ec));
14859 ec->cmd = ETHTOOL_GCOALESCE;
14860 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14861 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14862 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14863 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14864 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14865 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14866 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14867 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14868 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14870 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14871 HOSTCC_MODE_CLRTICK_TXBD)) {
14872 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14873 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14874 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14875 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14878 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14879 ec->rx_coalesce_usecs_irq = 0;
14880 ec->tx_coalesce_usecs_irq = 0;
14881 ec->stats_block_coalesce_usecs = 0;
14885 static const struct net_device_ops tg3_netdev_ops = {
14886 .ndo_open = tg3_open,
14887 .ndo_stop = tg3_close,
14888 .ndo_start_xmit = tg3_start_xmit,
14889 .ndo_get_stats64 = tg3_get_stats64,
14890 .ndo_validate_addr = eth_validate_addr,
14891 .ndo_set_multicast_list = tg3_set_rx_mode,
14892 .ndo_set_mac_address = tg3_set_mac_addr,
14893 .ndo_do_ioctl = tg3_ioctl,
14894 .ndo_tx_timeout = tg3_tx_timeout,
14895 .ndo_change_mtu = tg3_change_mtu,
14896 .ndo_fix_features = tg3_fix_features,
14897 #ifdef CONFIG_NET_POLL_CONTROLLER
14898 .ndo_poll_controller = tg3_poll_controller,
14902 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14903 .ndo_open = tg3_open,
14904 .ndo_stop = tg3_close,
14905 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14906 .ndo_get_stats64 = tg3_get_stats64,
14907 .ndo_validate_addr = eth_validate_addr,
14908 .ndo_set_multicast_list = tg3_set_rx_mode,
14909 .ndo_set_mac_address = tg3_set_mac_addr,
14910 .ndo_do_ioctl = tg3_ioctl,
14911 .ndo_tx_timeout = tg3_tx_timeout,
14912 .ndo_change_mtu = tg3_change_mtu,
14913 #ifdef CONFIG_NET_POLL_CONTROLLER
14914 .ndo_poll_controller = tg3_poll_controller,
14918 static int __devinit tg3_init_one(struct pci_dev *pdev,
14919 const struct pci_device_id *ent)
14921 struct net_device *dev;
14923 int i, err, pm_cap;
14924 u32 sndmbx, rcvmbx, intmbx;
14926 u64 dma_mask, persist_dma_mask;
14927 u32 hw_features = 0;
14929 printk_once(KERN_INFO "%s\n", version);
14931 err = pci_enable_device(pdev);
14933 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14937 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14939 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14940 goto err_out_disable_pdev;
14943 pci_set_master(pdev);
14945 /* Find power-management capability. */
14946 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14948 dev_err(&pdev->dev,
14949 "Cannot find Power Management capability, aborting\n");
14951 goto err_out_free_res;
14954 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14956 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14958 goto err_out_free_res;
14961 SET_NETDEV_DEV(dev, &pdev->dev);
14963 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14965 tp = netdev_priv(dev);
14968 tp->pm_cap = pm_cap;
14969 tp->rx_mode = TG3_DEF_RX_MODE;
14970 tp->tx_mode = TG3_DEF_TX_MODE;
14973 tp->msg_enable = tg3_debug;
14975 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14977 /* The word/byte swap controls here control register access byte
14978 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14981 tp->misc_host_ctrl =
14982 MISC_HOST_CTRL_MASK_PCI_INT |
14983 MISC_HOST_CTRL_WORD_SWAP |
14984 MISC_HOST_CTRL_INDIR_ACCESS |
14985 MISC_HOST_CTRL_PCISTATE_RW;
14987 /* The NONFRM (non-frame) byte/word swap controls take effect
14988 * on descriptor entries, anything which isn't packet data.
14990 * The StrongARM chips on the board (one for tx, one for rx)
14991 * are running in big-endian mode.
14993 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14994 GRC_MODE_WSWAP_NONFRM_DATA);
14995 #ifdef __BIG_ENDIAN
14996 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14998 spin_lock_init(&tp->lock);
14999 spin_lock_init(&tp->indirect_lock);
15000 INIT_WORK(&tp->reset_task, tg3_reset_task);
15002 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15004 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15006 goto err_out_free_dev;
15009 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15010 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15012 dev->ethtool_ops = &tg3_ethtool_ops;
15013 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15014 dev->irq = pdev->irq;
15016 err = tg3_get_invariants(tp);
15018 dev_err(&pdev->dev,
15019 "Problem fetching invariants of chip, aborting\n");
15020 goto err_out_iounmap;
15023 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
15024 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
15025 dev->netdev_ops = &tg3_netdev_ops;
15027 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
15030 /* The EPB bridge inside 5714, 5715, and 5780 and any
15031 * device behind the EPB cannot support DMA addresses > 40-bit.
15032 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15033 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15034 * do DMA address check in tg3_start_xmit().
15036 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
15037 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15038 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
15039 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15040 #ifdef CONFIG_HIGHMEM
15041 dma_mask = DMA_BIT_MASK(64);
15044 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15046 /* Configure DMA attributes. */
15047 if (dma_mask > DMA_BIT_MASK(32)) {
15048 err = pci_set_dma_mask(pdev, dma_mask);
15050 dev->features |= NETIF_F_HIGHDMA;
15051 err = pci_set_consistent_dma_mask(pdev,
15054 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15055 "DMA for consistent allocations\n");
15056 goto err_out_iounmap;
15060 if (err || dma_mask == DMA_BIT_MASK(32)) {
15061 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15063 dev_err(&pdev->dev,
15064 "No usable DMA configuration, aborting\n");
15065 goto err_out_iounmap;
15069 tg3_init_bufmgr_config(tp);
15071 /* Selectively allow TSO based on operating conditions */
15072 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
15073 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
15074 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
15076 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
15077 tp->fw_needed = NULL;
15080 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15081 tp->fw_needed = FIRMWARE_TG3;
15083 /* TSO is on by default on chips that support hardware TSO.
15084 * Firmware TSO on older chips gives lower performance, so it
15085 * is off by default, but can be enabled using ethtool.
15087 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
15088 (dev->features & NETIF_F_IP_CSUM))
15089 hw_features |= NETIF_F_TSO;
15090 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
15091 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
15092 if (dev->features & NETIF_F_IPV6_CSUM)
15093 hw_features |= NETIF_F_TSO6;
15094 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
15095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15096 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15097 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15100 hw_features |= NETIF_F_TSO_ECN;
15103 dev->hw_features |= hw_features;
15104 dev->features |= hw_features;
15105 dev->vlan_features |= hw_features;
15107 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15108 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
15109 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15110 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
15111 tp->rx_pending = 63;
15114 err = tg3_get_device_address(tp);
15116 dev_err(&pdev->dev,
15117 "Could not obtain valid ethernet address, aborting\n");
15118 goto err_out_iounmap;
15121 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
15122 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15123 if (!tp->aperegs) {
15124 dev_err(&pdev->dev,
15125 "Cannot map APE registers, aborting\n");
15127 goto err_out_iounmap;
15130 tg3_ape_lock_init(tp);
15132 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
15133 tg3_read_dash_ver(tp);
15137 * Reset chip in case UNDI or EFI driver did not shutdown
15138 * DMA self test will enable WDMAC and we'll see (spurious)
15139 * pending DMA on the PCI bus at that point.
15141 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15142 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15143 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15144 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15147 err = tg3_test_dma(tp);
15149 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15150 goto err_out_apeunmap;
15153 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15154 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15155 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15156 for (i = 0; i < tp->irq_max; i++) {
15157 struct tg3_napi *tnapi = &tp->napi[i];
15160 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15162 tnapi->int_mbox = intmbx;
15168 tnapi->consmbox = rcvmbx;
15169 tnapi->prodmbox = sndmbx;
15172 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15174 tnapi->coal_now = HOSTCC_MODE_NOW;
15176 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
15180 * If we support MSIX, we'll be using RSS. If we're using
15181 * RSS, the first vector only handles link interrupts and the
15182 * remaining vectors handle rx and tx interrupts. Reuse the
15183 * mailbox values for the next iteration. The values we setup
15184 * above are still useful for the single vectored mode.
15199 pci_set_drvdata(pdev, dev);
15201 err = register_netdev(dev);
15203 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15204 goto err_out_apeunmap;
15207 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15208 tp->board_part_number,
15209 tp->pci_chip_rev_id,
15210 tg3_bus_string(tp, str),
15213 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15214 struct phy_device *phydev;
15215 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15217 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15218 phydev->drv->name, dev_name(&phydev->dev));
15222 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15223 ethtype = "10/100Base-TX";
15224 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15225 ethtype = "1000Base-SX";
15227 ethtype = "10/100/1000Base-T";
15229 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15230 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
15231 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
15234 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15235 (dev->features & NETIF_F_RXCSUM) != 0,
15236 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
15237 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15238 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
15239 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
15240 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15242 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15243 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15249 iounmap(tp->aperegs);
15250 tp->aperegs = NULL;
15263 pci_release_regions(pdev);
15265 err_out_disable_pdev:
15266 pci_disable_device(pdev);
15267 pci_set_drvdata(pdev, NULL);
15271 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15273 struct net_device *dev = pci_get_drvdata(pdev);
15276 struct tg3 *tp = netdev_priv(dev);
15279 release_firmware(tp->fw);
15281 cancel_work_sync(&tp->reset_task);
15283 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
15288 unregister_netdev(dev);
15290 iounmap(tp->aperegs);
15291 tp->aperegs = NULL;
15298 pci_release_regions(pdev);
15299 pci_disable_device(pdev);
15300 pci_set_drvdata(pdev, NULL);
15304 #ifdef CONFIG_PM_SLEEP
15305 static int tg3_suspend(struct device *device)
15307 struct pci_dev *pdev = to_pci_dev(device);
15308 struct net_device *dev = pci_get_drvdata(pdev);
15309 struct tg3 *tp = netdev_priv(dev);
15312 if (!netif_running(dev))
15315 flush_work_sync(&tp->reset_task);
15317 tg3_netif_stop(tp);
15319 del_timer_sync(&tp->timer);
15321 tg3_full_lock(tp, 1);
15322 tg3_disable_ints(tp);
15323 tg3_full_unlock(tp);
15325 netif_device_detach(dev);
15327 tg3_full_lock(tp, 0);
15328 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15329 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
15330 tg3_full_unlock(tp);
15332 err = tg3_power_down_prepare(tp);
15336 tg3_full_lock(tp, 0);
15338 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15339 err2 = tg3_restart_hw(tp, 1);
15343 tp->timer.expires = jiffies + tp->timer_offset;
15344 add_timer(&tp->timer);
15346 netif_device_attach(dev);
15347 tg3_netif_start(tp);
15350 tg3_full_unlock(tp);
15359 static int tg3_resume(struct device *device)
15361 struct pci_dev *pdev = to_pci_dev(device);
15362 struct net_device *dev = pci_get_drvdata(pdev);
15363 struct tg3 *tp = netdev_priv(dev);
15366 if (!netif_running(dev))
15369 netif_device_attach(dev);
15371 tg3_full_lock(tp, 0);
15373 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15374 err = tg3_restart_hw(tp, 1);
15378 tp->timer.expires = jiffies + tp->timer_offset;
15379 add_timer(&tp->timer);
15381 tg3_netif_start(tp);
15384 tg3_full_unlock(tp);
15392 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15393 #define TG3_PM_OPS (&tg3_pm_ops)
15397 #define TG3_PM_OPS NULL
15399 #endif /* CONFIG_PM_SLEEP */
15401 static struct pci_driver tg3_driver = {
15402 .name = DRV_MODULE_NAME,
15403 .id_table = tg3_pci_tbl,
15404 .probe = tg3_init_one,
15405 .remove = __devexit_p(tg3_remove_one),
15406 .driver.pm = TG3_PM_OPS,
15409 static int __init tg3_init(void)
15411 return pci_register_driver(&tg3_driver);
15414 static void __exit tg3_cleanup(void)
15416 pci_unregister_driver(&tg3_driver);
15419 module_init(tg3_init);
15420 module_exit(tg3_cleanup);