2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 #define DRV_MODULE_NAME "tg3"
67 #define TG3_MIN_NUM 118
68 #define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70 #define DRV_MODULE_RELDATE "April 22, 2011"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_STD_RING_SIZE(tp) \
100 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
101 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JMB_RING_SIZE(tp) \
104 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
105 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
106 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 #define TG3_RSS_INDIR_TBL_SIZE 128
109 /* Do not place this n-ring entries value into the tp struct itself,
110 * we really want to expose these constants to GCC so that modulo et
111 * al. operations are done with shifts and masks instead of with
112 * hw multiply/modulo instructions. Another solution would be to
113 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_STD_RING_BYTES(tp) \
120 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
121 #define TG3_RX_JMB_RING_BYTES(tp) \
122 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
123 #define TG3_RX_RCB_RING_BYTES(tp) \
124 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
140 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
142 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
143 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
145 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
146 * that are at least dword aligned when used in PCIX mode. The driver
147 * works around this bug by double copying the packet. This workaround
148 * is built into the normal double copy length check for efficiency.
150 * However, the double copy is only necessary on those architectures
151 * where unaligned memory accesses are inefficient. For those architectures
152 * where unaligned memory accesses incur little penalty, we can reintegrate
153 * the 5701 in the normal rx path. Doing so saves a device structure
154 * dereference by hardcoding the double copy threshold in place.
156 #define TG3_RX_COPY_THRESHOLD 256
157 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
158 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
160 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
163 /* minimum number of free TX descriptors required to wake up TX process */
164 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
166 #define TG3_RAW_IP_ALIGN 2
168 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
170 #define FIRMWARE_TG3 "tigon/tg3.bin"
171 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
172 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
174 static char version[] __devinitdata =
175 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
177 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
178 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
179 MODULE_LICENSE("GPL");
180 MODULE_VERSION(DRV_MODULE_VERSION);
181 MODULE_FIRMWARE(FIRMWARE_TG3);
182 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
183 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
185 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
186 module_param(tg3_debug, int, 0);
187 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
189 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
263 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
264 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
265 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
266 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
267 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
268 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
269 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
273 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
275 static const struct {
276 const char string[ETH_GSTRING_LEN];
277 } ethtool_stats_keys[] = {
280 { "rx_ucast_packets" },
281 { "rx_mcast_packets" },
282 { "rx_bcast_packets" },
284 { "rx_align_errors" },
285 { "rx_xon_pause_rcvd" },
286 { "rx_xoff_pause_rcvd" },
287 { "rx_mac_ctrl_rcvd" },
288 { "rx_xoff_entered" },
289 { "rx_frame_too_long_errors" },
291 { "rx_undersize_packets" },
292 { "rx_in_length_errors" },
293 { "rx_out_length_errors" },
294 { "rx_64_or_less_octet_packets" },
295 { "rx_65_to_127_octet_packets" },
296 { "rx_128_to_255_octet_packets" },
297 { "rx_256_to_511_octet_packets" },
298 { "rx_512_to_1023_octet_packets" },
299 { "rx_1024_to_1522_octet_packets" },
300 { "rx_1523_to_2047_octet_packets" },
301 { "rx_2048_to_4095_octet_packets" },
302 { "rx_4096_to_8191_octet_packets" },
303 { "rx_8192_to_9022_octet_packets" },
310 { "tx_flow_control" },
312 { "tx_single_collisions" },
313 { "tx_mult_collisions" },
315 { "tx_excessive_collisions" },
316 { "tx_late_collisions" },
317 { "tx_collide_2times" },
318 { "tx_collide_3times" },
319 { "tx_collide_4times" },
320 { "tx_collide_5times" },
321 { "tx_collide_6times" },
322 { "tx_collide_7times" },
323 { "tx_collide_8times" },
324 { "tx_collide_9times" },
325 { "tx_collide_10times" },
326 { "tx_collide_11times" },
327 { "tx_collide_12times" },
328 { "tx_collide_13times" },
329 { "tx_collide_14times" },
330 { "tx_collide_15times" },
331 { "tx_ucast_packets" },
332 { "tx_mcast_packets" },
333 { "tx_bcast_packets" },
334 { "tx_carrier_sense_errors" },
338 { "dma_writeq_full" },
339 { "dma_write_prioq_full" },
342 { "mbuf_lwm_thresh_hit" },
344 { "rx_threshold_hit" },
346 { "dma_readq_full" },
347 { "dma_read_prioq_full" },
348 { "tx_comp_queue_full" },
350 { "ring_set_send_prod_index" },
351 { "ring_status_update" },
353 { "nic_avoided_irqs" },
354 { "nic_tx_threshold_hit" }
357 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_test_keys[] = {
363 { "nvram test (online) " },
364 { "link test (online) " },
365 { "register test (offline)" },
366 { "memory test (offline)" },
367 { "loopback test (offline)" },
368 { "interrupt test (offline)" },
371 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
374 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
376 writel(val, tp->regs + off);
379 static u32 tg3_read32(struct tg3 *tp, u32 off)
381 return readl(tp->regs + off);
384 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
386 writel(val, tp->aperegs + off);
389 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
391 return readl(tp->aperegs + off);
394 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
404 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
407 readl(tp->regs + off);
410 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
415 spin_lock_irqsave(&tp->indirect_lock, flags);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
417 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
418 spin_unlock_irqrestore(&tp->indirect_lock, flags);
422 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
426 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
427 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
428 TG3_64BIT_REG_LOW, val);
431 if (off == TG3_RX_STD_PROD_IDX_REG) {
432 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
433 TG3_64BIT_REG_LOW, val);
437 spin_lock_irqsave(&tp->indirect_lock, flags);
438 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
439 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
440 spin_unlock_irqrestore(&tp->indirect_lock, flags);
442 /* In indirect mode when disabling interrupts, we also need
443 * to clear the interrupt bit in the GRC local ctrl register.
445 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
447 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
448 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
452 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 /* usec_wait specifies the wait time in usec when writing to certain registers
465 * where it is unsafe to read back the register without some delay.
466 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
467 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
469 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
471 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
472 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 /* Non-posted methods */
474 tp->write32(tp, off, val);
477 tg3_write32(tp, off, val);
482 /* Wait again after the read for the posted method to guarantee that
483 * the wait time is met.
489 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
491 tp->write32_mbox(tp, off, val);
492 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
493 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
494 tp->read32_mbox(tp, off);
497 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
499 void __iomem *mbox = tp->regs + off;
501 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
503 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
507 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
509 return readl(tp->regs + off + GRCMBOX_BASE);
512 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
514 writel(val, tp->regs + off + GRCMBOX_BASE);
517 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
518 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
519 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
520 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
521 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
523 #define tw32(reg, val) tp->write32(tp, reg, val)
524 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
525 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
526 #define tr32(reg) tp->read32(tp, reg)
528 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
532 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
533 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
536 spin_lock_irqsave(&tp->indirect_lock, flags);
537 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
539 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
541 /* Always leave this as zero. */
542 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
544 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
545 tw32_f(TG3PCI_MEM_WIN_DATA, val);
547 /* Always leave this as zero. */
548 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
550 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
557 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
558 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
563 spin_lock_irqsave(&tp->indirect_lock, flags);
564 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
566 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568 /* Always leave this as zero. */
569 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
572 *val = tr32(TG3PCI_MEM_WIN_DATA);
574 /* Always leave this as zero. */
575 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577 spin_unlock_irqrestore(&tp->indirect_lock, flags);
580 static void tg3_ape_lock_init(struct tg3 *tp)
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
586 regbase = TG3_APE_LOCK_GRANT;
588 regbase = TG3_APE_PER_LOCK_GRANT;
590 /* Make sure the driver hasn't any stale locks. */
591 for (i = 0; i < 8; i++)
592 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
595 static int tg3_ape_lock(struct tg3 *tp, int locknum)
599 u32 status, req, gnt;
601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
605 case TG3_APE_LOCK_GRC:
606 case TG3_APE_LOCK_MEM:
612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
613 req = TG3_APE_LOCK_REQ;
614 gnt = TG3_APE_LOCK_GRANT;
616 req = TG3_APE_PER_LOCK_REQ;
617 gnt = TG3_APE_PER_LOCK_GRANT;
622 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
624 /* Wait for up to 1 millisecond to acquire lock. */
625 for (i = 0; i < 100; i++) {
626 status = tg3_ape_read32(tp, gnt + off);
627 if (status == APE_LOCK_GRANT_DRIVER)
632 if (status != APE_LOCK_GRANT_DRIVER) {
633 /* Revoke the lock request. */
634 tg3_ape_write32(tp, gnt + off,
635 APE_LOCK_GRANT_DRIVER);
643 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
647 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
651 case TG3_APE_LOCK_GRC:
652 case TG3_APE_LOCK_MEM:
658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
659 gnt = TG3_APE_LOCK_GRANT;
661 gnt = TG3_APE_PER_LOCK_GRANT;
663 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
666 static void tg3_disable_ints(struct tg3 *tp)
670 tw32(TG3PCI_MISC_HOST_CTRL,
671 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
672 for (i = 0; i < tp->irq_max; i++)
673 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
676 static void tg3_enable_ints(struct tg3 *tp)
683 tw32(TG3PCI_MISC_HOST_CTRL,
684 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
686 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
687 for (i = 0; i < tp->irq_cnt; i++) {
688 struct tg3_napi *tnapi = &tp->napi[i];
690 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
691 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
692 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
694 tp->coal_now |= tnapi->coal_now;
697 /* Force an initial interrupt */
698 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
699 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
700 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
702 tw32(HOSTCC_MODE, tp->coal_now);
704 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
707 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
709 struct tg3 *tp = tnapi->tp;
710 struct tg3_hw_status *sblk = tnapi->hw_status;
711 unsigned int work_exists = 0;
713 /* check for phy events */
714 if (!(tp->tg3_flags &
715 (TG3_FLAG_USE_LINKCHG_REG |
716 TG3_FLAG_POLL_SERDES))) {
717 if (sblk->status & SD_STATUS_LINK_CHG)
720 /* check for RX/TX work to do */
721 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
722 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
729 * similar to tg3_enable_ints, but it accurately determines whether there
730 * is new work pending and can return without flushing the PIO write
731 * which reenables interrupts
733 static void tg3_int_reenable(struct tg3_napi *tnapi)
735 struct tg3 *tp = tnapi->tp;
737 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
740 /* When doing tagged status, this work check is unnecessary.
741 * The last_tag we write above tells the chip which piece of
742 * work we've completed.
744 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
746 tw32(HOSTCC_MODE, tp->coalesce_mode |
747 HOSTCC_MODE_ENABLE | tnapi->coal_now);
750 static void tg3_switch_clocks(struct tg3 *tp)
755 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
756 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
759 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
761 orig_clock_ctrl = clock_ctrl;
762 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
763 CLOCK_CTRL_CLKRUN_OENABLE |
765 tp->pci_clock_ctrl = clock_ctrl;
767 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
768 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769 tw32_wait_f(TG3PCI_CLOCK_CTRL,
770 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
772 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773 tw32_wait_f(TG3PCI_CLOCK_CTRL,
775 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
777 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778 clock_ctrl | (CLOCK_CTRL_ALTCLK),
781 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
784 #define PHY_BUSY_LOOPS 5000
786 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
792 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
794 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
800 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
801 MI_COM_PHY_ADDR_MASK);
802 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
803 MI_COM_REG_ADDR_MASK);
804 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
806 tw32_f(MAC_MI_COM, frame_val);
808 loops = PHY_BUSY_LOOPS;
811 frame_val = tr32(MAC_MI_COM);
813 if ((frame_val & MI_COM_BUSY) == 0) {
815 frame_val = tr32(MAC_MI_COM);
823 *val = frame_val & MI_COM_DATA_MASK;
827 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828 tw32_f(MAC_MI_MODE, tp->mi_mode);
835 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
841 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
842 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
845 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
851 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
852 MI_COM_PHY_ADDR_MASK);
853 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854 MI_COM_REG_ADDR_MASK);
855 frame_val |= (val & MI_COM_DATA_MASK);
856 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
858 tw32_f(MAC_MI_COM, frame_val);
860 loops = PHY_BUSY_LOOPS;
863 frame_val = tr32(MAC_MI_COM);
864 if ((frame_val & MI_COM_BUSY) == 0) {
866 frame_val = tr32(MAC_MI_COM);
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
884 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
888 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
892 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
896 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
897 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
901 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
907 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
911 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
915 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
919 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
920 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
924 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
930 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
934 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
936 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
941 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
945 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
947 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
952 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
956 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
957 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
958 MII_TG3_AUXCTL_SHDWSEL_MISC);
960 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
965 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
967 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
968 set |= MII_TG3_AUXCTL_MISC_WREN;
970 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
973 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
974 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
975 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
976 MII_TG3_AUXCTL_ACTL_TX_6DB)
978 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
979 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
980 MII_TG3_AUXCTL_ACTL_TX_6DB);
982 static int tg3_bmcr_reset(struct tg3 *tp)
987 /* OK, reset it, and poll the BMCR_RESET bit until it
988 * clears or we time out.
990 phy_control = BMCR_RESET;
991 err = tg3_writephy(tp, MII_BMCR, phy_control);
997 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1001 if ((phy_control & BMCR_RESET) == 0) {
1013 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1015 struct tg3 *tp = bp->priv;
1018 spin_lock_bh(&tp->lock);
1020 if (tg3_readphy(tp, reg, &val))
1023 spin_unlock_bh(&tp->lock);
1028 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1030 struct tg3 *tp = bp->priv;
1033 spin_lock_bh(&tp->lock);
1035 if (tg3_writephy(tp, reg, val))
1038 spin_unlock_bh(&tp->lock);
1043 static int tg3_mdio_reset(struct mii_bus *bp)
1048 static void tg3_mdio_config_5785(struct tg3 *tp)
1051 struct phy_device *phydev;
1053 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1054 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1055 case PHY_ID_BCM50610:
1056 case PHY_ID_BCM50610M:
1057 val = MAC_PHYCFG2_50610_LED_MODES;
1059 case PHY_ID_BCMAC131:
1060 val = MAC_PHYCFG2_AC131_LED_MODES;
1062 case PHY_ID_RTL8211C:
1063 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1065 case PHY_ID_RTL8201E:
1066 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1072 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1073 tw32(MAC_PHYCFG2, val);
1075 val = tr32(MAC_PHYCFG1);
1076 val &= ~(MAC_PHYCFG1_RGMII_INT |
1077 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1078 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1079 tw32(MAC_PHYCFG1, val);
1084 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
1085 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1086 MAC_PHYCFG2_FMODE_MASK_MASK |
1087 MAC_PHYCFG2_GMODE_MASK_MASK |
1088 MAC_PHYCFG2_ACT_MASK_MASK |
1089 MAC_PHYCFG2_QUAL_MASK_MASK |
1090 MAC_PHYCFG2_INBAND_ENABLE;
1092 tw32(MAC_PHYCFG2, val);
1094 val = tr32(MAC_PHYCFG1);
1095 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1096 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1097 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1098 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1099 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1100 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1101 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1103 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1104 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1105 tw32(MAC_PHYCFG1, val);
1107 val = tr32(MAC_EXT_RGMII_MODE);
1108 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1109 MAC_RGMII_MODE_RX_QUALITY |
1110 MAC_RGMII_MODE_RX_ACTIVITY |
1111 MAC_RGMII_MODE_RX_ENG_DET |
1112 MAC_RGMII_MODE_TX_ENABLE |
1113 MAC_RGMII_MODE_TX_LOWPWR |
1114 MAC_RGMII_MODE_TX_RESET);
1115 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1116 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1117 val |= MAC_RGMII_MODE_RX_INT_B |
1118 MAC_RGMII_MODE_RX_QUALITY |
1119 MAC_RGMII_MODE_RX_ACTIVITY |
1120 MAC_RGMII_MODE_RX_ENG_DET;
1121 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1122 val |= MAC_RGMII_MODE_TX_ENABLE |
1123 MAC_RGMII_MODE_TX_LOWPWR |
1124 MAC_RGMII_MODE_TX_RESET;
1126 tw32(MAC_EXT_RGMII_MODE, val);
1129 static void tg3_mdio_start(struct tg3 *tp)
1131 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1132 tw32_f(MAC_MI_MODE, tp->mi_mode);
1135 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1137 tg3_mdio_config_5785(tp);
1140 static int tg3_mdio_init(struct tg3 *tp)
1144 struct phy_device *phydev;
1146 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
1149 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1151 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1152 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1154 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1155 TG3_CPMU_PHY_STRAP_IS_SERDES;
1159 tp->phy_addr = TG3_PHY_MII_ADDR;
1163 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1164 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1167 tp->mdio_bus = mdiobus_alloc();
1168 if (tp->mdio_bus == NULL)
1171 tp->mdio_bus->name = "tg3 mdio bus";
1172 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1173 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1174 tp->mdio_bus->priv = tp;
1175 tp->mdio_bus->parent = &tp->pdev->dev;
1176 tp->mdio_bus->read = &tg3_mdio_read;
1177 tp->mdio_bus->write = &tg3_mdio_write;
1178 tp->mdio_bus->reset = &tg3_mdio_reset;
1179 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1180 tp->mdio_bus->irq = &tp->mdio_irq[0];
1182 for (i = 0; i < PHY_MAX_ADDR; i++)
1183 tp->mdio_bus->irq[i] = PHY_POLL;
1185 /* The bus registration will look for all the PHYs on the mdio bus.
1186 * Unfortunately, it does not ensure the PHY is powered up before
1187 * accessing the PHY ID registers. A chip reset is the
1188 * quickest way to bring the device back to an operational state..
1190 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1193 i = mdiobus_register(tp->mdio_bus);
1195 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1196 mdiobus_free(tp->mdio_bus);
1200 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1202 if (!phydev || !phydev->drv) {
1203 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1204 mdiobus_unregister(tp->mdio_bus);
1205 mdiobus_free(tp->mdio_bus);
1209 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1210 case PHY_ID_BCM57780:
1211 phydev->interface = PHY_INTERFACE_MODE_GMII;
1212 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1214 case PHY_ID_BCM50610:
1215 case PHY_ID_BCM50610M:
1216 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1217 PHY_BRCM_RX_REFCLK_UNUSED |
1218 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1219 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1220 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1221 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1222 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1223 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1224 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1225 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1227 case PHY_ID_RTL8211C:
1228 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1230 case PHY_ID_RTL8201E:
1231 case PHY_ID_BCMAC131:
1232 phydev->interface = PHY_INTERFACE_MODE_MII;
1233 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1234 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1238 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1241 tg3_mdio_config_5785(tp);
1246 static void tg3_mdio_fini(struct tg3 *tp)
1248 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1249 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1250 mdiobus_unregister(tp->mdio_bus);
1251 mdiobus_free(tp->mdio_bus);
1255 /* tp->lock is held. */
1256 static inline void tg3_generate_fw_event(struct tg3 *tp)
1260 val = tr32(GRC_RX_CPU_EVENT);
1261 val |= GRC_RX_CPU_DRIVER_EVENT;
1262 tw32_f(GRC_RX_CPU_EVENT, val);
1264 tp->last_event_jiffies = jiffies;
1267 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1269 /* tp->lock is held. */
1270 static void tg3_wait_for_event_ack(struct tg3 *tp)
1273 unsigned int delay_cnt;
1276 /* If enough time has passed, no wait is necessary. */
1277 time_remain = (long)(tp->last_event_jiffies + 1 +
1278 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1280 if (time_remain < 0)
1283 /* Check if we can shorten the wait time. */
1284 delay_cnt = jiffies_to_usecs(time_remain);
1285 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1286 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1287 delay_cnt = (delay_cnt >> 3) + 1;
1289 for (i = 0; i < delay_cnt; i++) {
1290 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1296 /* tp->lock is held. */
1297 static void tg3_ump_link_report(struct tg3 *tp)
1302 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1303 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1306 tg3_wait_for_event_ack(tp);
1308 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1310 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1313 if (!tg3_readphy(tp, MII_BMCR, ®))
1315 if (!tg3_readphy(tp, MII_BMSR, ®))
1316 val |= (reg & 0xffff);
1317 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1320 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1322 if (!tg3_readphy(tp, MII_LPA, ®))
1323 val |= (reg & 0xffff);
1324 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1327 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1328 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1330 if (!tg3_readphy(tp, MII_STAT1000, ®))
1331 val |= (reg & 0xffff);
1333 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1335 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1339 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1341 tg3_generate_fw_event(tp);
1344 static void tg3_link_report(struct tg3 *tp)
1346 if (!netif_carrier_ok(tp->dev)) {
1347 netif_info(tp, link, tp->dev, "Link is down\n");
1348 tg3_ump_link_report(tp);
1349 } else if (netif_msg_link(tp)) {
1350 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1351 (tp->link_config.active_speed == SPEED_1000 ?
1353 (tp->link_config.active_speed == SPEED_100 ?
1355 (tp->link_config.active_duplex == DUPLEX_FULL ?
1358 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1359 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1361 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1364 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1365 netdev_info(tp->dev, "EEE is %s\n",
1366 tp->setlpicnt ? "enabled" : "disabled");
1368 tg3_ump_link_report(tp);
1372 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1376 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1377 miireg = ADVERTISE_PAUSE_CAP;
1378 else if (flow_ctrl & FLOW_CTRL_TX)
1379 miireg = ADVERTISE_PAUSE_ASYM;
1380 else if (flow_ctrl & FLOW_CTRL_RX)
1381 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1388 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1392 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1393 miireg = ADVERTISE_1000XPAUSE;
1394 else if (flow_ctrl & FLOW_CTRL_TX)
1395 miireg = ADVERTISE_1000XPSE_ASYM;
1396 else if (flow_ctrl & FLOW_CTRL_RX)
1397 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1404 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1408 if (lcladv & ADVERTISE_1000XPAUSE) {
1409 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1410 if (rmtadv & LPA_1000XPAUSE)
1411 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1412 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1415 if (rmtadv & LPA_1000XPAUSE)
1416 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1418 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1419 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1426 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1430 u32 old_rx_mode = tp->rx_mode;
1431 u32 old_tx_mode = tp->tx_mode;
1433 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1434 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1436 autoneg = tp->link_config.autoneg;
1438 if (autoneg == AUTONEG_ENABLE &&
1439 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1440 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1441 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1443 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1445 flowctrl = tp->link_config.flowctrl;
1447 tp->link_config.active_flowctrl = flowctrl;
1449 if (flowctrl & FLOW_CTRL_RX)
1450 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1452 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1454 if (old_rx_mode != tp->rx_mode)
1455 tw32_f(MAC_RX_MODE, tp->rx_mode);
1457 if (flowctrl & FLOW_CTRL_TX)
1458 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1460 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1462 if (old_tx_mode != tp->tx_mode)
1463 tw32_f(MAC_TX_MODE, tp->tx_mode);
1466 static void tg3_adjust_link(struct net_device *dev)
1468 u8 oldflowctrl, linkmesg = 0;
1469 u32 mac_mode, lcl_adv, rmt_adv;
1470 struct tg3 *tp = netdev_priv(dev);
1471 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1473 spin_lock_bh(&tp->lock);
1475 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1476 MAC_MODE_HALF_DUPLEX);
1478 oldflowctrl = tp->link_config.active_flowctrl;
1484 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1485 mac_mode |= MAC_MODE_PORT_MODE_MII;
1486 else if (phydev->speed == SPEED_1000 ||
1487 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1488 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1490 mac_mode |= MAC_MODE_PORT_MODE_MII;
1492 if (phydev->duplex == DUPLEX_HALF)
1493 mac_mode |= MAC_MODE_HALF_DUPLEX;
1495 lcl_adv = tg3_advert_flowctrl_1000T(
1496 tp->link_config.flowctrl);
1499 rmt_adv = LPA_PAUSE_CAP;
1500 if (phydev->asym_pause)
1501 rmt_adv |= LPA_PAUSE_ASYM;
1504 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1506 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1508 if (mac_mode != tp->mac_mode) {
1509 tp->mac_mode = mac_mode;
1510 tw32_f(MAC_MODE, tp->mac_mode);
1514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1515 if (phydev->speed == SPEED_10)
1517 MAC_MI_STAT_10MBPS_MODE |
1518 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1520 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1523 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1524 tw32(MAC_TX_LENGTHS,
1525 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1526 (6 << TX_LENGTHS_IPG_SHIFT) |
1527 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1529 tw32(MAC_TX_LENGTHS,
1530 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1531 (6 << TX_LENGTHS_IPG_SHIFT) |
1532 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1534 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1535 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1536 phydev->speed != tp->link_config.active_speed ||
1537 phydev->duplex != tp->link_config.active_duplex ||
1538 oldflowctrl != tp->link_config.active_flowctrl)
1541 tp->link_config.active_speed = phydev->speed;
1542 tp->link_config.active_duplex = phydev->duplex;
1544 spin_unlock_bh(&tp->lock);
1547 tg3_link_report(tp);
1550 static int tg3_phy_init(struct tg3 *tp)
1552 struct phy_device *phydev;
1554 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1557 /* Bring the PHY back to a known state. */
1560 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1562 /* Attach the MAC to the PHY. */
1563 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1564 phydev->dev_flags, phydev->interface);
1565 if (IS_ERR(phydev)) {
1566 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1567 return PTR_ERR(phydev);
1570 /* Mask with MAC supported features. */
1571 switch (phydev->interface) {
1572 case PHY_INTERFACE_MODE_GMII:
1573 case PHY_INTERFACE_MODE_RGMII:
1574 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1575 phydev->supported &= (PHY_GBIT_FEATURES |
1577 SUPPORTED_Asym_Pause);
1581 case PHY_INTERFACE_MODE_MII:
1582 phydev->supported &= (PHY_BASIC_FEATURES |
1584 SUPPORTED_Asym_Pause);
1587 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1591 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1593 phydev->advertising = phydev->supported;
1598 static void tg3_phy_start(struct tg3 *tp)
1600 struct phy_device *phydev;
1602 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1605 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1608 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1609 phydev->speed = tp->link_config.orig_speed;
1610 phydev->duplex = tp->link_config.orig_duplex;
1611 phydev->autoneg = tp->link_config.orig_autoneg;
1612 phydev->advertising = tp->link_config.orig_advertising;
1617 phy_start_aneg(phydev);
1620 static void tg3_phy_stop(struct tg3 *tp)
1622 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1625 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1628 static void tg3_phy_fini(struct tg3 *tp)
1630 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1631 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1632 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1636 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1640 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1643 tg3_writephy(tp, MII_TG3_FET_TEST,
1644 phytest | MII_TG3_FET_SHADOW_EN);
1645 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1647 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1649 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1650 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1652 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1656 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1661 ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
1662 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1665 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1666 tg3_phy_fet_toggle_apd(tp, enable);
1670 reg = MII_TG3_MISC_SHDW_WREN |
1671 MII_TG3_MISC_SHDW_SCR5_SEL |
1672 MII_TG3_MISC_SHDW_SCR5_LPED |
1673 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1674 MII_TG3_MISC_SHDW_SCR5_SDTL |
1675 MII_TG3_MISC_SHDW_SCR5_C125OE;
1676 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1677 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1679 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1682 reg = MII_TG3_MISC_SHDW_WREN |
1683 MII_TG3_MISC_SHDW_APD_SEL |
1684 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1686 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1688 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1691 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1695 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1696 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1699 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1702 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1703 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1705 tg3_writephy(tp, MII_TG3_FET_TEST,
1706 ephy | MII_TG3_FET_SHADOW_EN);
1707 if (!tg3_readphy(tp, reg, &phy)) {
1709 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1711 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1712 tg3_writephy(tp, reg, phy);
1714 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1719 ret = tg3_phy_auxctl_read(tp,
1720 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1723 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1725 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1726 tg3_phy_auxctl_write(tp,
1727 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1732 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1737 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1740 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1743 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1746 static void tg3_phy_apply_otp(struct tg3 *tp)
1755 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1758 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1759 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1760 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1762 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1763 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1764 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1766 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1767 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1768 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1770 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1771 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1773 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1774 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1776 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1777 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1778 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1780 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1783 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1787 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1792 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1793 current_link_up == 1 &&
1794 tp->link_config.active_duplex == DUPLEX_FULL &&
1795 (tp->link_config.active_speed == SPEED_100 ||
1796 tp->link_config.active_speed == SPEED_1000)) {
1799 if (tp->link_config.active_speed == SPEED_1000)
1800 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1802 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1804 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1806 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1807 TG3_CL45_D7_EEERES_STAT, &val);
1810 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1811 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1814 case ASIC_REV_57765:
1815 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1816 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1818 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1822 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1827 if (!tp->setlpicnt) {
1828 val = tr32(TG3_CPMU_EEE_MODE);
1829 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1833 static int tg3_wait_macro_done(struct tg3 *tp)
1840 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1841 if ((tmp32 & 0x1000) == 0)
1851 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1853 static const u32 test_pat[4][6] = {
1854 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1855 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1856 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1857 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1861 for (chan = 0; chan < 4; chan++) {
1864 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1865 (chan * 0x2000) | 0x0200);
1866 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1868 for (i = 0; i < 6; i++)
1869 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1872 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1873 if (tg3_wait_macro_done(tp)) {
1878 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1879 (chan * 0x2000) | 0x0200);
1880 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1881 if (tg3_wait_macro_done(tp)) {
1886 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1887 if (tg3_wait_macro_done(tp)) {
1892 for (i = 0; i < 6; i += 2) {
1895 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1896 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1897 tg3_wait_macro_done(tp)) {
1903 if (low != test_pat[chan][i] ||
1904 high != test_pat[chan][i+1]) {
1905 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1906 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1907 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1917 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1921 for (chan = 0; chan < 4; chan++) {
1924 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1925 (chan * 0x2000) | 0x0200);
1926 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1927 for (i = 0; i < 6; i++)
1928 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1929 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1930 if (tg3_wait_macro_done(tp))
1937 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1939 u32 reg32, phy9_orig;
1940 int retries, do_phy_reset, err;
1946 err = tg3_bmcr_reset(tp);
1952 /* Disable transmitter and interrupt. */
1953 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1957 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1959 /* Set full-duplex, 1000 mbps. */
1960 tg3_writephy(tp, MII_BMCR,
1961 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1963 /* Set to master mode. */
1964 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1967 tg3_writephy(tp, MII_TG3_CTRL,
1968 (MII_TG3_CTRL_AS_MASTER |
1969 MII_TG3_CTRL_ENABLE_AS_MASTER));
1971 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1975 /* Block the PHY control access. */
1976 tg3_phydsp_write(tp, 0x8005, 0x0800);
1978 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1981 } while (--retries);
1983 err = tg3_phy_reset_chanpat(tp);
1987 tg3_phydsp_write(tp, 0x8005, 0x0000);
1989 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1990 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1992 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1994 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1996 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1998 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2005 /* This will reset the tigon3 PHY if there is no valid
2006 * link unless the FORCE argument is non-zero.
2008 static int tg3_phy_reset(struct tg3 *tp)
2013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2014 val = tr32(GRC_MISC_CFG);
2015 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2018 err = tg3_readphy(tp, MII_BMSR, &val);
2019 err |= tg3_readphy(tp, MII_BMSR, &val);
2023 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2024 netif_carrier_off(tp->dev);
2025 tg3_link_report(tp);
2028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2031 err = tg3_phy_reset_5703_4_5(tp);
2038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2039 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2040 cpmuctrl = tr32(TG3_CPMU_CTRL);
2041 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2043 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2046 err = tg3_bmcr_reset(tp);
2050 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2051 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2052 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2054 tw32(TG3_CPMU_CTRL, cpmuctrl);
2057 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2058 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2059 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2060 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2061 CPMU_LSPD_1000MB_MACCLK_12_5) {
2062 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2064 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2068 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
2069 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2072 tg3_phy_apply_otp(tp);
2074 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2075 tg3_phy_toggle_apd(tp, true);
2077 tg3_phy_toggle_apd(tp, false);
2080 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2081 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2082 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2083 tg3_phydsp_write(tp, 0x000a, 0x0323);
2084 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2087 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2088 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2089 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2092 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2093 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2094 tg3_phydsp_write(tp, 0x000a, 0x310b);
2095 tg3_phydsp_write(tp, 0x201f, 0x9506);
2096 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2097 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2100 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2101 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2102 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2103 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2104 tg3_writephy(tp, MII_TG3_TEST1,
2105 MII_TG3_TEST1_TRIM_EN | 0x4);
2107 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2109 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2113 /* Set Extended packet length bit (bit 14) on all chips that */
2114 /* support jumbo frames */
2115 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2116 /* Cannot do read-modify-write on 5401 */
2117 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2118 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2119 /* Set bit 14 with read-modify-write to preserve other bits */
2120 err = tg3_phy_auxctl_read(tp,
2121 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2123 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2124 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2127 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2128 * jumbo frames transmission.
2130 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2131 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2132 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2133 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2137 /* adjust output voltage */
2138 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2141 tg3_phy_toggle_automdix(tp, 1);
2142 tg3_phy_set_wirespeed(tp);
2146 static void tg3_frob_aux_power(struct tg3 *tp)
2148 bool need_vaux = false;
2150 /* The GPIOs do something completely different on 57765. */
2151 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2156 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2160 tp->pdev_peer != tp->pdev) {
2161 struct net_device *dev_peer;
2163 dev_peer = pci_get_drvdata(tp->pdev_peer);
2165 /* remove_one() may have been run on the peer. */
2167 struct tg3 *tp_peer = netdev_priv(dev_peer);
2169 if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
2172 if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2173 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
2178 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2179 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2185 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2186 (GRC_LCLCTRL_GPIO_OE0 |
2187 GRC_LCLCTRL_GPIO_OE1 |
2188 GRC_LCLCTRL_GPIO_OE2 |
2189 GRC_LCLCTRL_GPIO_OUTPUT0 |
2190 GRC_LCLCTRL_GPIO_OUTPUT1),
2192 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2193 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2194 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2195 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2196 GRC_LCLCTRL_GPIO_OE1 |
2197 GRC_LCLCTRL_GPIO_OE2 |
2198 GRC_LCLCTRL_GPIO_OUTPUT0 |
2199 GRC_LCLCTRL_GPIO_OUTPUT1 |
2201 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2203 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2204 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2206 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2207 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2210 u32 grc_local_ctrl = 0;
2212 /* Workaround to prevent overdrawing Amps. */
2213 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2215 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2216 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2217 grc_local_ctrl, 100);
2220 /* On 5753 and variants, GPIO2 cannot be used. */
2221 no_gpio2 = tp->nic_sram_data_cfg &
2222 NIC_SRAM_DATA_CFG_NO_GPIO2;
2224 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2225 GRC_LCLCTRL_GPIO_OE1 |
2226 GRC_LCLCTRL_GPIO_OE2 |
2227 GRC_LCLCTRL_GPIO_OUTPUT1 |
2228 GRC_LCLCTRL_GPIO_OUTPUT2;
2230 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2231 GRC_LCLCTRL_GPIO_OUTPUT2);
2233 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2234 grc_local_ctrl, 100);
2236 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2238 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239 grc_local_ctrl, 100);
2242 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2243 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2244 grc_local_ctrl, 100);
2248 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2249 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2250 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2251 (GRC_LCLCTRL_GPIO_OE1 |
2252 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2254 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2255 GRC_LCLCTRL_GPIO_OE1, 100);
2257 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2258 (GRC_LCLCTRL_GPIO_OE1 |
2259 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2264 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2266 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2268 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2269 if (speed != SPEED_10)
2271 } else if (speed == SPEED_10)
2277 static int tg3_setup_phy(struct tg3 *, int);
2279 #define RESET_KIND_SHUTDOWN 0
2280 #define RESET_KIND_INIT 1
2281 #define RESET_KIND_SUSPEND 2
2283 static void tg3_write_sig_post_reset(struct tg3 *, int);
2284 static int tg3_halt_cpu(struct tg3 *, u32);
2286 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2290 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2292 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2293 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2296 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2297 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2298 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2305 val = tr32(GRC_MISC_CFG);
2306 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2309 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2311 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2314 tg3_writephy(tp, MII_ADVERTISE, 0);
2315 tg3_writephy(tp, MII_BMCR,
2316 BMCR_ANENABLE | BMCR_ANRESTART);
2318 tg3_writephy(tp, MII_TG3_FET_TEST,
2319 phytest | MII_TG3_FET_SHADOW_EN);
2320 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2321 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2323 MII_TG3_FET_SHDW_AUXMODE4,
2326 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2329 } else if (do_low_power) {
2330 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2331 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2333 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2334 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2335 MII_TG3_AUXCTL_PCTL_VREG_11V;
2336 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2339 /* The PHY should not be powered down on some chips because
2342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2344 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2345 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2348 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2349 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2350 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2351 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2352 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2353 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2356 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2359 /* tp->lock is held. */
2360 static int tg3_nvram_lock(struct tg3 *tp)
2362 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2365 if (tp->nvram_lock_cnt == 0) {
2366 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2367 for (i = 0; i < 8000; i++) {
2368 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2373 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2377 tp->nvram_lock_cnt++;
2382 /* tp->lock is held. */
2383 static void tg3_nvram_unlock(struct tg3 *tp)
2385 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2386 if (tp->nvram_lock_cnt > 0)
2387 tp->nvram_lock_cnt--;
2388 if (tp->nvram_lock_cnt == 0)
2389 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2393 /* tp->lock is held. */
2394 static void tg3_enable_nvram_access(struct tg3 *tp)
2396 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2397 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2398 u32 nvaccess = tr32(NVRAM_ACCESS);
2400 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2404 /* tp->lock is held. */
2405 static void tg3_disable_nvram_access(struct tg3 *tp)
2407 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2408 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2409 u32 nvaccess = tr32(NVRAM_ACCESS);
2411 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2415 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2416 u32 offset, u32 *val)
2421 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2424 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2425 EEPROM_ADDR_DEVID_MASK |
2427 tw32(GRC_EEPROM_ADDR,
2429 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2430 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2431 EEPROM_ADDR_ADDR_MASK) |
2432 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2434 for (i = 0; i < 1000; i++) {
2435 tmp = tr32(GRC_EEPROM_ADDR);
2437 if (tmp & EEPROM_ADDR_COMPLETE)
2441 if (!(tmp & EEPROM_ADDR_COMPLETE))
2444 tmp = tr32(GRC_EEPROM_DATA);
2447 * The data will always be opposite the native endian
2448 * format. Perform a blind byteswap to compensate.
2455 #define NVRAM_CMD_TIMEOUT 10000
2457 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2461 tw32(NVRAM_CMD, nvram_cmd);
2462 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2464 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2470 if (i == NVRAM_CMD_TIMEOUT)
2476 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2478 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2479 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2480 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2481 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2482 (tp->nvram_jedecnum == JEDEC_ATMEL))
2484 addr = ((addr / tp->nvram_pagesize) <<
2485 ATMEL_AT45DB0X1B_PAGE_POS) +
2486 (addr % tp->nvram_pagesize);
2491 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2493 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2494 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2495 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2496 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2497 (tp->nvram_jedecnum == JEDEC_ATMEL))
2499 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2500 tp->nvram_pagesize) +
2501 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2506 /* NOTE: Data read in from NVRAM is byteswapped according to
2507 * the byteswapping settings for all other register accesses.
2508 * tg3 devices are BE devices, so on a BE machine, the data
2509 * returned will be exactly as it is seen in NVRAM. On a LE
2510 * machine, the 32-bit value will be byteswapped.
2512 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2516 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2517 return tg3_nvram_read_using_eeprom(tp, offset, val);
2519 offset = tg3_nvram_phys_addr(tp, offset);
2521 if (offset > NVRAM_ADDR_MSK)
2524 ret = tg3_nvram_lock(tp);
2528 tg3_enable_nvram_access(tp);
2530 tw32(NVRAM_ADDR, offset);
2531 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2532 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2535 *val = tr32(NVRAM_RDDATA);
2537 tg3_disable_nvram_access(tp);
2539 tg3_nvram_unlock(tp);
2544 /* Ensures NVRAM data is in bytestream format. */
2545 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2548 int res = tg3_nvram_read(tp, offset, &v);
2550 *val = cpu_to_be32(v);
2554 /* tp->lock is held. */
2555 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2557 u32 addr_high, addr_low;
2560 addr_high = ((tp->dev->dev_addr[0] << 8) |
2561 tp->dev->dev_addr[1]);
2562 addr_low = ((tp->dev->dev_addr[2] << 24) |
2563 (tp->dev->dev_addr[3] << 16) |
2564 (tp->dev->dev_addr[4] << 8) |
2565 (tp->dev->dev_addr[5] << 0));
2566 for (i = 0; i < 4; i++) {
2567 if (i == 1 && skip_mac_1)
2569 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2570 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2575 for (i = 0; i < 12; i++) {
2576 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2577 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2581 addr_high = (tp->dev->dev_addr[0] +
2582 tp->dev->dev_addr[1] +
2583 tp->dev->dev_addr[2] +
2584 tp->dev->dev_addr[3] +
2585 tp->dev->dev_addr[4] +
2586 tp->dev->dev_addr[5]) &
2587 TX_BACKOFF_SEED_MASK;
2588 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2591 static void tg3_enable_register_access(struct tg3 *tp)
2594 * Make sure register accesses (indirect or otherwise) will function
2597 pci_write_config_dword(tp->pdev,
2598 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2601 static int tg3_power_up(struct tg3 *tp)
2603 tg3_enable_register_access(tp);
2605 pci_set_power_state(tp->pdev, PCI_D0);
2607 /* Switch out of Vaux if it is a NIC */
2608 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2609 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2614 static int tg3_power_down_prepare(struct tg3 *tp)
2617 bool device_should_wake, do_low_power;
2619 tg3_enable_register_access(tp);
2621 /* Restore the CLKREQ setting. */
2622 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2625 pci_read_config_word(tp->pdev,
2626 tp->pcie_cap + PCI_EXP_LNKCTL,
2628 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2629 pci_write_config_word(tp->pdev,
2630 tp->pcie_cap + PCI_EXP_LNKCTL,
2634 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2635 tw32(TG3PCI_MISC_HOST_CTRL,
2636 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2638 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2639 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2641 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2642 do_low_power = false;
2643 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2644 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2645 struct phy_device *phydev;
2646 u32 phyid, advertising;
2648 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2650 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2652 tp->link_config.orig_speed = phydev->speed;
2653 tp->link_config.orig_duplex = phydev->duplex;
2654 tp->link_config.orig_autoneg = phydev->autoneg;
2655 tp->link_config.orig_advertising = phydev->advertising;
2657 advertising = ADVERTISED_TP |
2659 ADVERTISED_Autoneg |
2660 ADVERTISED_10baseT_Half;
2662 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2663 device_should_wake) {
2664 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2666 ADVERTISED_100baseT_Half |
2667 ADVERTISED_100baseT_Full |
2668 ADVERTISED_10baseT_Full;
2670 advertising |= ADVERTISED_10baseT_Full;
2673 phydev->advertising = advertising;
2675 phy_start_aneg(phydev);
2677 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2678 if (phyid != PHY_ID_BCMAC131) {
2679 phyid &= PHY_BCM_OUI_MASK;
2680 if (phyid == PHY_BCM_OUI_1 ||
2681 phyid == PHY_BCM_OUI_2 ||
2682 phyid == PHY_BCM_OUI_3)
2683 do_low_power = true;
2687 do_low_power = true;
2689 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2690 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2691 tp->link_config.orig_speed = tp->link_config.speed;
2692 tp->link_config.orig_duplex = tp->link_config.duplex;
2693 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2696 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2697 tp->link_config.speed = SPEED_10;
2698 tp->link_config.duplex = DUPLEX_HALF;
2699 tp->link_config.autoneg = AUTONEG_ENABLE;
2700 tg3_setup_phy(tp, 0);
2704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2707 val = tr32(GRC_VCPU_EXT_CTRL);
2708 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2709 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2713 for (i = 0; i < 200; i++) {
2714 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2715 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2720 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2721 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2722 WOL_DRV_STATE_SHUTDOWN |
2726 if (device_should_wake) {
2729 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2731 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2732 tg3_phy_auxctl_write(tp,
2733 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2734 MII_TG3_AUXCTL_PCTL_WOL_EN |
2735 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2736 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2740 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2741 mac_mode = MAC_MODE_PORT_MODE_GMII;
2743 mac_mode = MAC_MODE_PORT_MODE_MII;
2745 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2746 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2748 u32 speed = (tp->tg3_flags &
2749 TG3_FLAG_WOL_SPEED_100MB) ?
2750 SPEED_100 : SPEED_10;
2751 if (tg3_5700_link_polarity(tp, speed))
2752 mac_mode |= MAC_MODE_LINK_POLARITY;
2754 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2757 mac_mode = MAC_MODE_PORT_MODE_TBI;
2760 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2761 tw32(MAC_LED_CTRL, tp->led_ctrl);
2763 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2764 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2765 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2766 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2767 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2768 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2770 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2771 mac_mode |= MAC_MODE_APE_TX_EN |
2772 MAC_MODE_APE_RX_EN |
2773 MAC_MODE_TDE_ENABLE;
2775 tw32_f(MAC_MODE, mac_mode);
2778 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2782 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2783 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2787 base_val = tp->pci_clock_ctrl;
2788 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2789 CLOCK_CTRL_TXCLK_DISABLE);
2791 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2792 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2793 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2794 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2795 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2797 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2798 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2799 u32 newbits1, newbits2;
2801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2803 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2804 CLOCK_CTRL_TXCLK_DISABLE |
2806 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2807 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2808 newbits1 = CLOCK_CTRL_625_CORE;
2809 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2811 newbits1 = CLOCK_CTRL_ALTCLK;
2812 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2818 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2821 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2826 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2827 CLOCK_CTRL_TXCLK_DISABLE |
2828 CLOCK_CTRL_44MHZ_CORE);
2830 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2833 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2834 tp->pci_clock_ctrl | newbits3, 40);
2838 if (!(device_should_wake) &&
2839 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2840 tg3_power_down_phy(tp, do_low_power);
2842 tg3_frob_aux_power(tp);
2844 /* Workaround for unstable PLL clock */
2845 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2846 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2847 u32 val = tr32(0x7d00);
2849 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2851 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2854 err = tg3_nvram_lock(tp);
2855 tg3_halt_cpu(tp, RX_CPU_BASE);
2857 tg3_nvram_unlock(tp);
2861 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2866 static void tg3_power_down(struct tg3 *tp)
2868 tg3_power_down_prepare(tp);
2870 pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2871 pci_set_power_state(tp->pdev, PCI_D3hot);
2874 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2876 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2877 case MII_TG3_AUX_STAT_10HALF:
2879 *duplex = DUPLEX_HALF;
2882 case MII_TG3_AUX_STAT_10FULL:
2884 *duplex = DUPLEX_FULL;
2887 case MII_TG3_AUX_STAT_100HALF:
2889 *duplex = DUPLEX_HALF;
2892 case MII_TG3_AUX_STAT_100FULL:
2894 *duplex = DUPLEX_FULL;
2897 case MII_TG3_AUX_STAT_1000HALF:
2898 *speed = SPEED_1000;
2899 *duplex = DUPLEX_HALF;
2902 case MII_TG3_AUX_STAT_1000FULL:
2903 *speed = SPEED_1000;
2904 *duplex = DUPLEX_FULL;
2908 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2909 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2911 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2915 *speed = SPEED_INVALID;
2916 *duplex = DUPLEX_INVALID;
2921 static void tg3_phy_copper_begin(struct tg3 *tp)
2926 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2927 /* Entering low power mode. Disable gigabit and
2928 * 100baseT advertisements.
2930 tg3_writephy(tp, MII_TG3_CTRL, 0);
2932 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2933 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2934 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2935 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2937 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2938 } else if (tp->link_config.speed == SPEED_INVALID) {
2939 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2940 tp->link_config.advertising &=
2941 ~(ADVERTISED_1000baseT_Half |
2942 ADVERTISED_1000baseT_Full);
2944 new_adv = ADVERTISE_CSMA;
2945 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2946 new_adv |= ADVERTISE_10HALF;
2947 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2948 new_adv |= ADVERTISE_10FULL;
2949 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2950 new_adv |= ADVERTISE_100HALF;
2951 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2952 new_adv |= ADVERTISE_100FULL;
2954 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2956 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2958 if (tp->link_config.advertising &
2959 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2961 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2962 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2963 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2964 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2965 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2966 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2967 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2968 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2969 MII_TG3_CTRL_ENABLE_AS_MASTER);
2970 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2972 tg3_writephy(tp, MII_TG3_CTRL, 0);
2975 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2976 new_adv |= ADVERTISE_CSMA;
2978 /* Asking for a specific link mode. */
2979 if (tp->link_config.speed == SPEED_1000) {
2980 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2982 if (tp->link_config.duplex == DUPLEX_FULL)
2983 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2985 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2986 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2987 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2988 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2989 MII_TG3_CTRL_ENABLE_AS_MASTER);
2991 if (tp->link_config.speed == SPEED_100) {
2992 if (tp->link_config.duplex == DUPLEX_FULL)
2993 new_adv |= ADVERTISE_100FULL;
2995 new_adv |= ADVERTISE_100HALF;
2997 if (tp->link_config.duplex == DUPLEX_FULL)
2998 new_adv |= ADVERTISE_10FULL;
3000 new_adv |= ADVERTISE_10HALF;
3002 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3007 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3010 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3013 tw32(TG3_CPMU_EEE_MODE,
3014 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3016 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3018 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3020 case ASIC_REV_57765:
3021 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3022 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3023 MII_TG3_DSP_CH34TP2_HIBW01);
3026 val = MII_TG3_DSP_TAP26_ALNOKO |
3027 MII_TG3_DSP_TAP26_RMRXSTO |
3028 MII_TG3_DSP_TAP26_OPCSINPT;
3029 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3033 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3034 /* Advertise 100-BaseTX EEE ability */
3035 if (tp->link_config.advertising &
3036 ADVERTISED_100baseT_Full)
3037 val |= MDIO_AN_EEE_ADV_100TX;
3038 /* Advertise 1000-BaseT EEE ability */
3039 if (tp->link_config.advertising &
3040 ADVERTISED_1000baseT_Full)
3041 val |= MDIO_AN_EEE_ADV_1000T;
3043 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3045 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3048 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3049 tp->link_config.speed != SPEED_INVALID) {
3050 u32 bmcr, orig_bmcr;
3052 tp->link_config.active_speed = tp->link_config.speed;
3053 tp->link_config.active_duplex = tp->link_config.duplex;
3056 switch (tp->link_config.speed) {
3062 bmcr |= BMCR_SPEED100;
3066 bmcr |= TG3_BMCR_SPEED1000;
3070 if (tp->link_config.duplex == DUPLEX_FULL)
3071 bmcr |= BMCR_FULLDPLX;
3073 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3074 (bmcr != orig_bmcr)) {
3075 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3076 for (i = 0; i < 1500; i++) {
3080 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3081 tg3_readphy(tp, MII_BMSR, &tmp))
3083 if (!(tmp & BMSR_LSTATUS)) {
3088 tg3_writephy(tp, MII_BMCR, bmcr);
3092 tg3_writephy(tp, MII_BMCR,
3093 BMCR_ANENABLE | BMCR_ANRESTART);
3097 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3101 /* Turn off tap power management. */
3102 /* Set Extended packet length bit */
3103 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3105 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3106 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3107 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3108 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3109 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3116 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3118 u32 adv_reg, all_mask = 0;
3120 if (mask & ADVERTISED_10baseT_Half)
3121 all_mask |= ADVERTISE_10HALF;
3122 if (mask & ADVERTISED_10baseT_Full)
3123 all_mask |= ADVERTISE_10FULL;
3124 if (mask & ADVERTISED_100baseT_Half)
3125 all_mask |= ADVERTISE_100HALF;
3126 if (mask & ADVERTISED_100baseT_Full)
3127 all_mask |= ADVERTISE_100FULL;
3129 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3132 if ((adv_reg & all_mask) != all_mask)
3134 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3138 if (mask & ADVERTISED_1000baseT_Half)
3139 all_mask |= ADVERTISE_1000HALF;
3140 if (mask & ADVERTISED_1000baseT_Full)
3141 all_mask |= ADVERTISE_1000FULL;
3143 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3146 if ((tg3_ctrl & all_mask) != all_mask)
3152 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3156 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3159 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3160 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3162 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3163 if (curadv != reqadv)
3166 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3167 tg3_readphy(tp, MII_LPA, rmtadv);
3169 /* Reprogram the advertisement register, even if it
3170 * does not affect the current link. If the link
3171 * gets renegotiated in the future, we can save an
3172 * additional renegotiation cycle by advertising
3173 * it correctly in the first place.
3175 if (curadv != reqadv) {
3176 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3177 ADVERTISE_PAUSE_ASYM);
3178 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3185 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3187 int current_link_up;
3189 u32 lcl_adv, rmt_adv;
3197 (MAC_STATUS_SYNC_CHANGED |
3198 MAC_STATUS_CFG_CHANGED |
3199 MAC_STATUS_MI_COMPLETION |
3200 MAC_STATUS_LNKSTATE_CHANGED));
3203 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3205 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3209 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3211 /* Some third-party PHYs need to be reset on link going
3214 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3217 netif_carrier_ok(tp->dev)) {
3218 tg3_readphy(tp, MII_BMSR, &bmsr);
3219 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3220 !(bmsr & BMSR_LSTATUS))
3226 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3227 tg3_readphy(tp, MII_BMSR, &bmsr);
3228 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3229 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3232 if (!(bmsr & BMSR_LSTATUS)) {
3233 err = tg3_init_5401phy_dsp(tp);
3237 tg3_readphy(tp, MII_BMSR, &bmsr);
3238 for (i = 0; i < 1000; i++) {
3240 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3241 (bmsr & BMSR_LSTATUS)) {
3247 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3248 TG3_PHY_REV_BCM5401_B0 &&
3249 !(bmsr & BMSR_LSTATUS) &&
3250 tp->link_config.active_speed == SPEED_1000) {
3251 err = tg3_phy_reset(tp);
3253 err = tg3_init_5401phy_dsp(tp);
3258 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3259 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3260 /* 5701 {A0,B0} CRC bug workaround */
3261 tg3_writephy(tp, 0x15, 0x0a75);
3262 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3263 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3264 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3267 /* Clear pending interrupts... */
3268 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3269 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3271 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3272 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3273 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3274 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3277 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3278 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3279 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3280 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3282 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3285 current_link_up = 0;
3286 current_speed = SPEED_INVALID;
3287 current_duplex = DUPLEX_INVALID;
3289 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3290 err = tg3_phy_auxctl_read(tp,
3291 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3293 if (!err && !(val & (1 << 10))) {
3294 tg3_phy_auxctl_write(tp,
3295 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3302 for (i = 0; i < 100; i++) {
3303 tg3_readphy(tp, MII_BMSR, &bmsr);
3304 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3305 (bmsr & BMSR_LSTATUS))
3310 if (bmsr & BMSR_LSTATUS) {
3313 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3314 for (i = 0; i < 2000; i++) {
3316 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3321 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3326 for (i = 0; i < 200; i++) {
3327 tg3_readphy(tp, MII_BMCR, &bmcr);
3328 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3330 if (bmcr && bmcr != 0x7fff)
3338 tp->link_config.active_speed = current_speed;
3339 tp->link_config.active_duplex = current_duplex;
3341 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3342 if ((bmcr & BMCR_ANENABLE) &&
3343 tg3_copper_is_advertising_all(tp,
3344 tp->link_config.advertising)) {
3345 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3347 current_link_up = 1;
3350 if (!(bmcr & BMCR_ANENABLE) &&
3351 tp->link_config.speed == current_speed &&
3352 tp->link_config.duplex == current_duplex &&
3353 tp->link_config.flowctrl ==
3354 tp->link_config.active_flowctrl) {
3355 current_link_up = 1;
3359 if (current_link_up == 1 &&
3360 tp->link_config.active_duplex == DUPLEX_FULL)
3361 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3365 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3366 tg3_phy_copper_begin(tp);
3368 tg3_readphy(tp, MII_BMSR, &bmsr);
3369 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3370 (bmsr & BMSR_LSTATUS))
3371 current_link_up = 1;
3374 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3375 if (current_link_up == 1) {
3376 if (tp->link_config.active_speed == SPEED_100 ||
3377 tp->link_config.active_speed == SPEED_10)
3378 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3380 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3381 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3382 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3384 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3386 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3387 if (tp->link_config.active_duplex == DUPLEX_HALF)
3388 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3391 if (current_link_up == 1 &&
3392 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3393 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3395 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3398 /* ??? Without this setting Netgear GA302T PHY does not
3399 * ??? send/receive packets...
3401 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3402 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3403 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3404 tw32_f(MAC_MI_MODE, tp->mi_mode);
3408 tw32_f(MAC_MODE, tp->mac_mode);
3411 tg3_phy_eee_adjust(tp, current_link_up);
3413 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3414 /* Polled via timer. */
3415 tw32_f(MAC_EVENT, 0);
3417 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3422 current_link_up == 1 &&
3423 tp->link_config.active_speed == SPEED_1000 &&
3424 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3425 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3428 (MAC_STATUS_SYNC_CHANGED |
3429 MAC_STATUS_CFG_CHANGED));
3432 NIC_SRAM_FIRMWARE_MBOX,
3433 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3436 /* Prevent send BD corruption. */
3437 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3438 u16 oldlnkctl, newlnkctl;
3440 pci_read_config_word(tp->pdev,
3441 tp->pcie_cap + PCI_EXP_LNKCTL,
3443 if (tp->link_config.active_speed == SPEED_100 ||
3444 tp->link_config.active_speed == SPEED_10)
3445 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3447 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3448 if (newlnkctl != oldlnkctl)
3449 pci_write_config_word(tp->pdev,
3450 tp->pcie_cap + PCI_EXP_LNKCTL,
3454 if (current_link_up != netif_carrier_ok(tp->dev)) {
3455 if (current_link_up)
3456 netif_carrier_on(tp->dev);
3458 netif_carrier_off(tp->dev);
3459 tg3_link_report(tp);
3465 struct tg3_fiber_aneginfo {
3467 #define ANEG_STATE_UNKNOWN 0
3468 #define ANEG_STATE_AN_ENABLE 1
3469 #define ANEG_STATE_RESTART_INIT 2
3470 #define ANEG_STATE_RESTART 3
3471 #define ANEG_STATE_DISABLE_LINK_OK 4
3472 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3473 #define ANEG_STATE_ABILITY_DETECT 6
3474 #define ANEG_STATE_ACK_DETECT_INIT 7
3475 #define ANEG_STATE_ACK_DETECT 8
3476 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3477 #define ANEG_STATE_COMPLETE_ACK 10
3478 #define ANEG_STATE_IDLE_DETECT_INIT 11
3479 #define ANEG_STATE_IDLE_DETECT 12
3480 #define ANEG_STATE_LINK_OK 13
3481 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3482 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3485 #define MR_AN_ENABLE 0x00000001
3486 #define MR_RESTART_AN 0x00000002
3487 #define MR_AN_COMPLETE 0x00000004
3488 #define MR_PAGE_RX 0x00000008
3489 #define MR_NP_LOADED 0x00000010
3490 #define MR_TOGGLE_TX 0x00000020
3491 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3492 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3493 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3494 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3495 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3496 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3497 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3498 #define MR_TOGGLE_RX 0x00002000
3499 #define MR_NP_RX 0x00004000
3501 #define MR_LINK_OK 0x80000000
3503 unsigned long link_time, cur_time;
3505 u32 ability_match_cfg;
3506 int ability_match_count;
3508 char ability_match, idle_match, ack_match;
3510 u32 txconfig, rxconfig;
3511 #define ANEG_CFG_NP 0x00000080
3512 #define ANEG_CFG_ACK 0x00000040
3513 #define ANEG_CFG_RF2 0x00000020
3514 #define ANEG_CFG_RF1 0x00000010
3515 #define ANEG_CFG_PS2 0x00000001
3516 #define ANEG_CFG_PS1 0x00008000
3517 #define ANEG_CFG_HD 0x00004000
3518 #define ANEG_CFG_FD 0x00002000
3519 #define ANEG_CFG_INVAL 0x00001f06
3524 #define ANEG_TIMER_ENAB 2
3525 #define ANEG_FAILED -1
3527 #define ANEG_STATE_SETTLE_TIME 10000
3529 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3530 struct tg3_fiber_aneginfo *ap)
3533 unsigned long delta;
3537 if (ap->state == ANEG_STATE_UNKNOWN) {
3541 ap->ability_match_cfg = 0;
3542 ap->ability_match_count = 0;
3543 ap->ability_match = 0;
3549 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3550 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3552 if (rx_cfg_reg != ap->ability_match_cfg) {
3553 ap->ability_match_cfg = rx_cfg_reg;
3554 ap->ability_match = 0;
3555 ap->ability_match_count = 0;
3557 if (++ap->ability_match_count > 1) {
3558 ap->ability_match = 1;
3559 ap->ability_match_cfg = rx_cfg_reg;
3562 if (rx_cfg_reg & ANEG_CFG_ACK)
3570 ap->ability_match_cfg = 0;
3571 ap->ability_match_count = 0;
3572 ap->ability_match = 0;
3578 ap->rxconfig = rx_cfg_reg;
3581 switch (ap->state) {
3582 case ANEG_STATE_UNKNOWN:
3583 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3584 ap->state = ANEG_STATE_AN_ENABLE;
3587 case ANEG_STATE_AN_ENABLE:
3588 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3589 if (ap->flags & MR_AN_ENABLE) {
3592 ap->ability_match_cfg = 0;
3593 ap->ability_match_count = 0;
3594 ap->ability_match = 0;
3598 ap->state = ANEG_STATE_RESTART_INIT;
3600 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3604 case ANEG_STATE_RESTART_INIT:
3605 ap->link_time = ap->cur_time;
3606 ap->flags &= ~(MR_NP_LOADED);
3608 tw32(MAC_TX_AUTO_NEG, 0);
3609 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3610 tw32_f(MAC_MODE, tp->mac_mode);
3613 ret = ANEG_TIMER_ENAB;
3614 ap->state = ANEG_STATE_RESTART;
3617 case ANEG_STATE_RESTART:
3618 delta = ap->cur_time - ap->link_time;
3619 if (delta > ANEG_STATE_SETTLE_TIME)
3620 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3622 ret = ANEG_TIMER_ENAB;
3625 case ANEG_STATE_DISABLE_LINK_OK:
3629 case ANEG_STATE_ABILITY_DETECT_INIT:
3630 ap->flags &= ~(MR_TOGGLE_TX);
3631 ap->txconfig = ANEG_CFG_FD;
3632 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3633 if (flowctrl & ADVERTISE_1000XPAUSE)
3634 ap->txconfig |= ANEG_CFG_PS1;
3635 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3636 ap->txconfig |= ANEG_CFG_PS2;
3637 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3638 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3639 tw32_f(MAC_MODE, tp->mac_mode);
3642 ap->state = ANEG_STATE_ABILITY_DETECT;
3645 case ANEG_STATE_ABILITY_DETECT:
3646 if (ap->ability_match != 0 && ap->rxconfig != 0)
3647 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3650 case ANEG_STATE_ACK_DETECT_INIT:
3651 ap->txconfig |= ANEG_CFG_ACK;
3652 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3653 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3654 tw32_f(MAC_MODE, tp->mac_mode);
3657 ap->state = ANEG_STATE_ACK_DETECT;
3660 case ANEG_STATE_ACK_DETECT:
3661 if (ap->ack_match != 0) {
3662 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3663 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3664 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3666 ap->state = ANEG_STATE_AN_ENABLE;
3668 } else if (ap->ability_match != 0 &&
3669 ap->rxconfig == 0) {
3670 ap->state = ANEG_STATE_AN_ENABLE;
3674 case ANEG_STATE_COMPLETE_ACK_INIT:
3675 if (ap->rxconfig & ANEG_CFG_INVAL) {
3679 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3680 MR_LP_ADV_HALF_DUPLEX |
3681 MR_LP_ADV_SYM_PAUSE |
3682 MR_LP_ADV_ASYM_PAUSE |
3683 MR_LP_ADV_REMOTE_FAULT1 |
3684 MR_LP_ADV_REMOTE_FAULT2 |
3685 MR_LP_ADV_NEXT_PAGE |
3688 if (ap->rxconfig & ANEG_CFG_FD)
3689 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3690 if (ap->rxconfig & ANEG_CFG_HD)
3691 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3692 if (ap->rxconfig & ANEG_CFG_PS1)
3693 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3694 if (ap->rxconfig & ANEG_CFG_PS2)
3695 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3696 if (ap->rxconfig & ANEG_CFG_RF1)
3697 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3698 if (ap->rxconfig & ANEG_CFG_RF2)
3699 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3700 if (ap->rxconfig & ANEG_CFG_NP)
3701 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3703 ap->link_time = ap->cur_time;
3705 ap->flags ^= (MR_TOGGLE_TX);
3706 if (ap->rxconfig & 0x0008)
3707 ap->flags |= MR_TOGGLE_RX;
3708 if (ap->rxconfig & ANEG_CFG_NP)
3709 ap->flags |= MR_NP_RX;
3710 ap->flags |= MR_PAGE_RX;
3712 ap->state = ANEG_STATE_COMPLETE_ACK;
3713 ret = ANEG_TIMER_ENAB;
3716 case ANEG_STATE_COMPLETE_ACK:
3717 if (ap->ability_match != 0 &&
3718 ap->rxconfig == 0) {
3719 ap->state = ANEG_STATE_AN_ENABLE;
3722 delta = ap->cur_time - ap->link_time;
3723 if (delta > ANEG_STATE_SETTLE_TIME) {
3724 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3725 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3727 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3728 !(ap->flags & MR_NP_RX)) {
3729 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3737 case ANEG_STATE_IDLE_DETECT_INIT:
3738 ap->link_time = ap->cur_time;
3739 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3740 tw32_f(MAC_MODE, tp->mac_mode);
3743 ap->state = ANEG_STATE_IDLE_DETECT;
3744 ret = ANEG_TIMER_ENAB;
3747 case ANEG_STATE_IDLE_DETECT:
3748 if (ap->ability_match != 0 &&
3749 ap->rxconfig == 0) {
3750 ap->state = ANEG_STATE_AN_ENABLE;
3753 delta = ap->cur_time - ap->link_time;
3754 if (delta > ANEG_STATE_SETTLE_TIME) {
3755 /* XXX another gem from the Broadcom driver :( */
3756 ap->state = ANEG_STATE_LINK_OK;
3760 case ANEG_STATE_LINK_OK:
3761 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3765 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3766 /* ??? unimplemented */
3769 case ANEG_STATE_NEXT_PAGE_WAIT:
3770 /* ??? unimplemented */
3781 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3784 struct tg3_fiber_aneginfo aninfo;
3785 int status = ANEG_FAILED;
3789 tw32_f(MAC_TX_AUTO_NEG, 0);
3791 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3792 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3795 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3798 memset(&aninfo, 0, sizeof(aninfo));
3799 aninfo.flags |= MR_AN_ENABLE;
3800 aninfo.state = ANEG_STATE_UNKNOWN;
3801 aninfo.cur_time = 0;
3803 while (++tick < 195000) {
3804 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3805 if (status == ANEG_DONE || status == ANEG_FAILED)
3811 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3812 tw32_f(MAC_MODE, tp->mac_mode);
3815 *txflags = aninfo.txconfig;
3816 *rxflags = aninfo.flags;
3818 if (status == ANEG_DONE &&
3819 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3820 MR_LP_ADV_FULL_DUPLEX)))
3826 static void tg3_init_bcm8002(struct tg3 *tp)
3828 u32 mac_status = tr32(MAC_STATUS);
3831 /* Reset when initting first time or we have a link. */
3832 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3833 !(mac_status & MAC_STATUS_PCS_SYNCED))
3836 /* Set PLL lock range. */
3837 tg3_writephy(tp, 0x16, 0x8007);
3840 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3842 /* Wait for reset to complete. */
3843 /* XXX schedule_timeout() ... */
3844 for (i = 0; i < 500; i++)
3847 /* Config mode; select PMA/Ch 1 regs. */
3848 tg3_writephy(tp, 0x10, 0x8411);
3850 /* Enable auto-lock and comdet, select txclk for tx. */
3851 tg3_writephy(tp, 0x11, 0x0a10);
3853 tg3_writephy(tp, 0x18, 0x00a0);
3854 tg3_writephy(tp, 0x16, 0x41ff);
3856 /* Assert and deassert POR. */
3857 tg3_writephy(tp, 0x13, 0x0400);
3859 tg3_writephy(tp, 0x13, 0x0000);
3861 tg3_writephy(tp, 0x11, 0x0a50);
3863 tg3_writephy(tp, 0x11, 0x0a10);
3865 /* Wait for signal to stabilize */
3866 /* XXX schedule_timeout() ... */
3867 for (i = 0; i < 15000; i++)
3870 /* Deselect the channel register so we can read the PHYID
3873 tg3_writephy(tp, 0x10, 0x8011);
3876 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3879 u32 sg_dig_ctrl, sg_dig_status;
3880 u32 serdes_cfg, expected_sg_dig_ctrl;
3881 int workaround, port_a;
3882 int current_link_up;
3885 expected_sg_dig_ctrl = 0;
3888 current_link_up = 0;
3890 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3891 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3893 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3896 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3897 /* preserve bits 20-23 for voltage regulator */
3898 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3901 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3903 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3904 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3906 u32 val = serdes_cfg;
3912 tw32_f(MAC_SERDES_CFG, val);
3915 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3917 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3918 tg3_setup_flow_control(tp, 0, 0);
3919 current_link_up = 1;
3924 /* Want auto-negotiation. */
3925 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3927 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3928 if (flowctrl & ADVERTISE_1000XPAUSE)
3929 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3930 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3931 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3933 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3934 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3935 tp->serdes_counter &&
3936 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3937 MAC_STATUS_RCVD_CFG)) ==
3938 MAC_STATUS_PCS_SYNCED)) {
3939 tp->serdes_counter--;
3940 current_link_up = 1;
3945 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3946 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3948 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3950 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3951 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3952 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3953 MAC_STATUS_SIGNAL_DET)) {
3954 sg_dig_status = tr32(SG_DIG_STATUS);
3955 mac_status = tr32(MAC_STATUS);
3957 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3958 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3959 u32 local_adv = 0, remote_adv = 0;
3961 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3962 local_adv |= ADVERTISE_1000XPAUSE;
3963 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3964 local_adv |= ADVERTISE_1000XPSE_ASYM;
3966 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3967 remote_adv |= LPA_1000XPAUSE;
3968 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3969 remote_adv |= LPA_1000XPAUSE_ASYM;
3971 tg3_setup_flow_control(tp, local_adv, remote_adv);
3972 current_link_up = 1;
3973 tp->serdes_counter = 0;
3974 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3975 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3976 if (tp->serdes_counter)
3977 tp->serdes_counter--;
3980 u32 val = serdes_cfg;
3987 tw32_f(MAC_SERDES_CFG, val);
3990 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3993 /* Link parallel detection - link is up */
3994 /* only if we have PCS_SYNC and not */
3995 /* receiving config code words */
3996 mac_status = tr32(MAC_STATUS);
3997 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3998 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3999 tg3_setup_flow_control(tp, 0, 0);
4000 current_link_up = 1;
4002 TG3_PHYFLG_PARALLEL_DETECT;
4003 tp->serdes_counter =
4004 SERDES_PARALLEL_DET_TIMEOUT;
4006 goto restart_autoneg;
4010 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4011 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4015 return current_link_up;
4018 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4020 int current_link_up = 0;
4022 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4025 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4026 u32 txflags, rxflags;
4029 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4030 u32 local_adv = 0, remote_adv = 0;
4032 if (txflags & ANEG_CFG_PS1)
4033 local_adv |= ADVERTISE_1000XPAUSE;
4034 if (txflags & ANEG_CFG_PS2)
4035 local_adv |= ADVERTISE_1000XPSE_ASYM;
4037 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4038 remote_adv |= LPA_1000XPAUSE;
4039 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4040 remote_adv |= LPA_1000XPAUSE_ASYM;
4042 tg3_setup_flow_control(tp, local_adv, remote_adv);
4044 current_link_up = 1;
4046 for (i = 0; i < 30; i++) {
4049 (MAC_STATUS_SYNC_CHANGED |
4050 MAC_STATUS_CFG_CHANGED));
4052 if ((tr32(MAC_STATUS) &
4053 (MAC_STATUS_SYNC_CHANGED |
4054 MAC_STATUS_CFG_CHANGED)) == 0)
4058 mac_status = tr32(MAC_STATUS);
4059 if (current_link_up == 0 &&
4060 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4061 !(mac_status & MAC_STATUS_RCVD_CFG))
4062 current_link_up = 1;
4064 tg3_setup_flow_control(tp, 0, 0);
4066 /* Forcing 1000FD link up. */
4067 current_link_up = 1;
4069 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4072 tw32_f(MAC_MODE, tp->mac_mode);
4077 return current_link_up;
4080 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4083 u16 orig_active_speed;
4084 u8 orig_active_duplex;
4086 int current_link_up;
4089 orig_pause_cfg = tp->link_config.active_flowctrl;
4090 orig_active_speed = tp->link_config.active_speed;
4091 orig_active_duplex = tp->link_config.active_duplex;
4093 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
4094 netif_carrier_ok(tp->dev) &&
4095 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
4096 mac_status = tr32(MAC_STATUS);
4097 mac_status &= (MAC_STATUS_PCS_SYNCED |
4098 MAC_STATUS_SIGNAL_DET |
4099 MAC_STATUS_CFG_CHANGED |
4100 MAC_STATUS_RCVD_CFG);
4101 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4102 MAC_STATUS_SIGNAL_DET)) {
4103 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4104 MAC_STATUS_CFG_CHANGED));
4109 tw32_f(MAC_TX_AUTO_NEG, 0);
4111 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4112 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4113 tw32_f(MAC_MODE, tp->mac_mode);
4116 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4117 tg3_init_bcm8002(tp);
4119 /* Enable link change event even when serdes polling. */
4120 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4123 current_link_up = 0;
4124 mac_status = tr32(MAC_STATUS);
4126 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4127 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4129 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4131 tp->napi[0].hw_status->status =
4132 (SD_STATUS_UPDATED |
4133 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4135 for (i = 0; i < 100; i++) {
4136 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4137 MAC_STATUS_CFG_CHANGED));
4139 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4140 MAC_STATUS_CFG_CHANGED |
4141 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4145 mac_status = tr32(MAC_STATUS);
4146 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4147 current_link_up = 0;
4148 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4149 tp->serdes_counter == 0) {
4150 tw32_f(MAC_MODE, (tp->mac_mode |
4151 MAC_MODE_SEND_CONFIGS));
4153 tw32_f(MAC_MODE, tp->mac_mode);
4157 if (current_link_up == 1) {
4158 tp->link_config.active_speed = SPEED_1000;
4159 tp->link_config.active_duplex = DUPLEX_FULL;
4160 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4161 LED_CTRL_LNKLED_OVERRIDE |
4162 LED_CTRL_1000MBPS_ON));
4164 tp->link_config.active_speed = SPEED_INVALID;
4165 tp->link_config.active_duplex = DUPLEX_INVALID;
4166 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167 LED_CTRL_LNKLED_OVERRIDE |
4168 LED_CTRL_TRAFFIC_OVERRIDE));
4171 if (current_link_up != netif_carrier_ok(tp->dev)) {
4172 if (current_link_up)
4173 netif_carrier_on(tp->dev);
4175 netif_carrier_off(tp->dev);
4176 tg3_link_report(tp);
4178 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4179 if (orig_pause_cfg != now_pause_cfg ||
4180 orig_active_speed != tp->link_config.active_speed ||
4181 orig_active_duplex != tp->link_config.active_duplex)
4182 tg3_link_report(tp);
4188 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4190 int current_link_up, err = 0;
4194 u32 local_adv, remote_adv;
4196 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4197 tw32_f(MAC_MODE, tp->mac_mode);
4203 (MAC_STATUS_SYNC_CHANGED |
4204 MAC_STATUS_CFG_CHANGED |
4205 MAC_STATUS_MI_COMPLETION |
4206 MAC_STATUS_LNKSTATE_CHANGED));
4212 current_link_up = 0;
4213 current_speed = SPEED_INVALID;
4214 current_duplex = DUPLEX_INVALID;
4216 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4217 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4219 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4220 bmsr |= BMSR_LSTATUS;
4222 bmsr &= ~BMSR_LSTATUS;
4225 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4227 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4228 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4229 /* do nothing, just check for link up at the end */
4230 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4233 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4234 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4235 ADVERTISE_1000XPAUSE |
4236 ADVERTISE_1000XPSE_ASYM |
4239 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4241 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4242 new_adv |= ADVERTISE_1000XHALF;
4243 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4244 new_adv |= ADVERTISE_1000XFULL;
4246 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4247 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4248 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4249 tg3_writephy(tp, MII_BMCR, bmcr);
4251 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4252 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4253 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4260 bmcr &= ~BMCR_SPEED1000;
4261 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4263 if (tp->link_config.duplex == DUPLEX_FULL)
4264 new_bmcr |= BMCR_FULLDPLX;
4266 if (new_bmcr != bmcr) {
4267 /* BMCR_SPEED1000 is a reserved bit that needs
4268 * to be set on write.
4270 new_bmcr |= BMCR_SPEED1000;
4272 /* Force a linkdown */
4273 if (netif_carrier_ok(tp->dev)) {
4276 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4277 adv &= ~(ADVERTISE_1000XFULL |
4278 ADVERTISE_1000XHALF |
4280 tg3_writephy(tp, MII_ADVERTISE, adv);
4281 tg3_writephy(tp, MII_BMCR, bmcr |
4285 netif_carrier_off(tp->dev);
4287 tg3_writephy(tp, MII_BMCR, new_bmcr);
4289 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4290 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4291 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4293 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4294 bmsr |= BMSR_LSTATUS;
4296 bmsr &= ~BMSR_LSTATUS;
4298 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4302 if (bmsr & BMSR_LSTATUS) {
4303 current_speed = SPEED_1000;
4304 current_link_up = 1;
4305 if (bmcr & BMCR_FULLDPLX)
4306 current_duplex = DUPLEX_FULL;
4308 current_duplex = DUPLEX_HALF;
4313 if (bmcr & BMCR_ANENABLE) {
4316 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4317 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4318 common = local_adv & remote_adv;
4319 if (common & (ADVERTISE_1000XHALF |
4320 ADVERTISE_1000XFULL)) {
4321 if (common & ADVERTISE_1000XFULL)
4322 current_duplex = DUPLEX_FULL;
4324 current_duplex = DUPLEX_HALF;
4325 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4326 /* Link is up via parallel detect */
4328 current_link_up = 0;
4333 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4334 tg3_setup_flow_control(tp, local_adv, remote_adv);
4336 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4337 if (tp->link_config.active_duplex == DUPLEX_HALF)
4338 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4340 tw32_f(MAC_MODE, tp->mac_mode);
4343 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4345 tp->link_config.active_speed = current_speed;
4346 tp->link_config.active_duplex = current_duplex;
4348 if (current_link_up != netif_carrier_ok(tp->dev)) {
4349 if (current_link_up)
4350 netif_carrier_on(tp->dev);
4352 netif_carrier_off(tp->dev);
4353 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4355 tg3_link_report(tp);
4360 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4362 if (tp->serdes_counter) {
4363 /* Give autoneg time to complete. */
4364 tp->serdes_counter--;
4368 if (!netif_carrier_ok(tp->dev) &&
4369 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4372 tg3_readphy(tp, MII_BMCR, &bmcr);
4373 if (bmcr & BMCR_ANENABLE) {
4376 /* Select shadow register 0x1f */
4377 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4378 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4380 /* Select expansion interrupt status register */
4381 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4382 MII_TG3_DSP_EXP1_INT_STAT);
4383 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4384 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4386 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4387 /* We have signal detect and not receiving
4388 * config code words, link is up by parallel
4392 bmcr &= ~BMCR_ANENABLE;
4393 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4394 tg3_writephy(tp, MII_BMCR, bmcr);
4395 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4398 } else if (netif_carrier_ok(tp->dev) &&
4399 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4400 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4403 /* Select expansion interrupt status register */
4404 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4405 MII_TG3_DSP_EXP1_INT_STAT);
4406 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4410 /* Config code words received, turn on autoneg. */
4411 tg3_readphy(tp, MII_BMCR, &bmcr);
4412 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4414 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4420 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4425 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4426 err = tg3_setup_fiber_phy(tp, force_reset);
4427 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4428 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4430 err = tg3_setup_copper_phy(tp, force_reset);
4432 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4435 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4436 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4438 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4443 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4444 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4445 tw32(GRC_MISC_CFG, val);
4448 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4449 (6 << TX_LENGTHS_IPG_SHIFT);
4450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4451 val |= tr32(MAC_TX_LENGTHS) &
4452 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4453 TX_LENGTHS_CNT_DWN_VAL_MSK);
4455 if (tp->link_config.active_speed == SPEED_1000 &&
4456 tp->link_config.active_duplex == DUPLEX_HALF)
4457 tw32(MAC_TX_LENGTHS, val |
4458 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4460 tw32(MAC_TX_LENGTHS, val |
4461 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4463 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4464 if (netif_carrier_ok(tp->dev)) {
4465 tw32(HOSTCC_STAT_COAL_TICKS,
4466 tp->coal.stats_block_coalesce_usecs);
4468 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4472 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4473 val = tr32(PCIE_PWR_MGMT_THRESH);
4474 if (!netif_carrier_ok(tp->dev))
4475 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4478 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4479 tw32(PCIE_PWR_MGMT_THRESH, val);
4485 static inline int tg3_irq_sync(struct tg3 *tp)
4487 return tp->irq_sync;
4490 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4494 dst = (u32 *)((u8 *)dst + off);
4495 for (i = 0; i < len; i += sizeof(u32))
4496 *dst++ = tr32(off + i);
4499 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4501 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4502 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4503 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4504 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4505 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4506 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4507 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4508 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4509 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4510 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4511 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4512 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4513 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4514 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4516 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4517 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4518 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4519 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4521 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)
4522 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4524 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4525 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4526 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4527 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4528 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4529 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4530 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4531 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4533 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4534 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4535 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4536 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4539 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4540 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4541 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4542 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4543 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4545 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4546 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4549 static void tg3_dump_state(struct tg3 *tp)
4554 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4556 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4560 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4561 /* Read up to but not including private PCI registers */
4562 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4563 regs[i / sizeof(u32)] = tr32(i);
4565 tg3_dump_legacy_regs(tp, regs);
4567 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4568 if (!regs[i + 0] && !regs[i + 1] &&
4569 !regs[i + 2] && !regs[i + 3])
4572 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4574 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4579 for (i = 0; i < tp->irq_cnt; i++) {
4580 struct tg3_napi *tnapi = &tp->napi[i];
4582 /* SW status block */
4584 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4586 tnapi->hw_status->status,
4587 tnapi->hw_status->status_tag,
4588 tnapi->hw_status->rx_jumbo_consumer,
4589 tnapi->hw_status->rx_consumer,
4590 tnapi->hw_status->rx_mini_consumer,
4591 tnapi->hw_status->idx[0].rx_producer,
4592 tnapi->hw_status->idx[0].tx_consumer);
4595 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4597 tnapi->last_tag, tnapi->last_irq_tag,
4598 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4600 tnapi->prodring.rx_std_prod_idx,
4601 tnapi->prodring.rx_std_cons_idx,
4602 tnapi->prodring.rx_jmb_prod_idx,
4603 tnapi->prodring.rx_jmb_cons_idx);
4607 /* This is called whenever we suspect that the system chipset is re-
4608 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4609 * is bogus tx completions. We try to recover by setting the
4610 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4613 static void tg3_tx_recover(struct tg3 *tp)
4615 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4616 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4618 netdev_warn(tp->dev,
4619 "The system may be re-ordering memory-mapped I/O "
4620 "cycles to the network device, attempting to recover. "
4621 "Please report the problem to the driver maintainer "
4622 "and include system chipset information.\n");
4624 spin_lock(&tp->lock);
4625 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4626 spin_unlock(&tp->lock);
4629 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4631 /* Tell compiler to fetch tx indices from memory. */
4633 return tnapi->tx_pending -
4634 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4637 /* Tigon3 never reports partial packet sends. So we do not
4638 * need special logic to handle SKBs that have not had all
4639 * of their frags sent yet, like SunGEM does.
4641 static void tg3_tx(struct tg3_napi *tnapi)
4643 struct tg3 *tp = tnapi->tp;
4644 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4645 u32 sw_idx = tnapi->tx_cons;
4646 struct netdev_queue *txq;
4647 int index = tnapi - tp->napi;
4649 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4652 txq = netdev_get_tx_queue(tp->dev, index);
4654 while (sw_idx != hw_idx) {
4655 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4656 struct sk_buff *skb = ri->skb;
4659 if (unlikely(skb == NULL)) {
4664 pci_unmap_single(tp->pdev,
4665 dma_unmap_addr(ri, mapping),
4671 sw_idx = NEXT_TX(sw_idx);
4673 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4674 ri = &tnapi->tx_buffers[sw_idx];
4675 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4678 pci_unmap_page(tp->pdev,
4679 dma_unmap_addr(ri, mapping),
4680 skb_shinfo(skb)->frags[i].size,
4682 sw_idx = NEXT_TX(sw_idx);
4687 if (unlikely(tx_bug)) {
4693 tnapi->tx_cons = sw_idx;
4695 /* Need to make the tx_cons update visible to tg3_start_xmit()
4696 * before checking for netif_queue_stopped(). Without the
4697 * memory barrier, there is a small possibility that tg3_start_xmit()
4698 * will miss it and cause the queue to be stopped forever.
4702 if (unlikely(netif_tx_queue_stopped(txq) &&
4703 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4704 __netif_tx_lock(txq, smp_processor_id());
4705 if (netif_tx_queue_stopped(txq) &&
4706 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4707 netif_tx_wake_queue(txq);
4708 __netif_tx_unlock(txq);
4712 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4717 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4718 map_sz, PCI_DMA_FROMDEVICE);
4719 dev_kfree_skb_any(ri->skb);
4723 /* Returns size of skb allocated or < 0 on error.
4725 * We only need to fill in the address because the other members
4726 * of the RX descriptor are invariant, see tg3_init_rings.
4728 * Note the purposeful assymetry of cpu vs. chip accesses. For
4729 * posting buffers we only dirty the first cache line of the RX
4730 * descriptor (containing the address). Whereas for the RX status
4731 * buffers the cpu only reads the last cacheline of the RX descriptor
4732 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4734 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4735 u32 opaque_key, u32 dest_idx_unmasked)
4737 struct tg3_rx_buffer_desc *desc;
4738 struct ring_info *map;
4739 struct sk_buff *skb;
4741 int skb_size, dest_idx;
4743 switch (opaque_key) {
4744 case RXD_OPAQUE_RING_STD:
4745 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4746 desc = &tpr->rx_std[dest_idx];
4747 map = &tpr->rx_std_buffers[dest_idx];
4748 skb_size = tp->rx_pkt_map_sz;
4751 case RXD_OPAQUE_RING_JUMBO:
4752 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4753 desc = &tpr->rx_jmb[dest_idx].std;
4754 map = &tpr->rx_jmb_buffers[dest_idx];
4755 skb_size = TG3_RX_JMB_MAP_SZ;
4762 /* Do not overwrite any of the map or rp information
4763 * until we are sure we can commit to a new buffer.
4765 * Callers depend upon this behavior and assume that
4766 * we leave everything unchanged if we fail.
4768 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4772 skb_reserve(skb, tp->rx_offset);
4774 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4775 PCI_DMA_FROMDEVICE);
4776 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4782 dma_unmap_addr_set(map, mapping, mapping);
4784 desc->addr_hi = ((u64)mapping >> 32);
4785 desc->addr_lo = ((u64)mapping & 0xffffffff);
4790 /* We only need to move over in the address because the other
4791 * members of the RX descriptor are invariant. See notes above
4792 * tg3_alloc_rx_skb for full details.
4794 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4795 struct tg3_rx_prodring_set *dpr,
4796 u32 opaque_key, int src_idx,
4797 u32 dest_idx_unmasked)
4799 struct tg3 *tp = tnapi->tp;
4800 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4801 struct ring_info *src_map, *dest_map;
4802 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4805 switch (opaque_key) {
4806 case RXD_OPAQUE_RING_STD:
4807 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4808 dest_desc = &dpr->rx_std[dest_idx];
4809 dest_map = &dpr->rx_std_buffers[dest_idx];
4810 src_desc = &spr->rx_std[src_idx];
4811 src_map = &spr->rx_std_buffers[src_idx];
4814 case RXD_OPAQUE_RING_JUMBO:
4815 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4816 dest_desc = &dpr->rx_jmb[dest_idx].std;
4817 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4818 src_desc = &spr->rx_jmb[src_idx].std;
4819 src_map = &spr->rx_jmb_buffers[src_idx];
4826 dest_map->skb = src_map->skb;
4827 dma_unmap_addr_set(dest_map, mapping,
4828 dma_unmap_addr(src_map, mapping));
4829 dest_desc->addr_hi = src_desc->addr_hi;
4830 dest_desc->addr_lo = src_desc->addr_lo;
4832 /* Ensure that the update to the skb happens after the physical
4833 * addresses have been transferred to the new BD location.
4837 src_map->skb = NULL;
4840 /* The RX ring scheme is composed of multiple rings which post fresh
4841 * buffers to the chip, and one special ring the chip uses to report
4842 * status back to the host.
4844 * The special ring reports the status of received packets to the
4845 * host. The chip does not write into the original descriptor the
4846 * RX buffer was obtained from. The chip simply takes the original
4847 * descriptor as provided by the host, updates the status and length
4848 * field, then writes this into the next status ring entry.
4850 * Each ring the host uses to post buffers to the chip is described
4851 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4852 * it is first placed into the on-chip ram. When the packet's length
4853 * is known, it walks down the TG3_BDINFO entries to select the ring.
4854 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4855 * which is within the range of the new packet's length is chosen.
4857 * The "separate ring for rx status" scheme may sound queer, but it makes
4858 * sense from a cache coherency perspective. If only the host writes
4859 * to the buffer post rings, and only the chip writes to the rx status
4860 * rings, then cache lines never move beyond shared-modified state.
4861 * If both the host and chip were to write into the same ring, cache line
4862 * eviction could occur since both entities want it in an exclusive state.
4864 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4866 struct tg3 *tp = tnapi->tp;
4867 u32 work_mask, rx_std_posted = 0;
4868 u32 std_prod_idx, jmb_prod_idx;
4869 u32 sw_idx = tnapi->rx_rcb_ptr;
4872 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4874 hw_idx = *(tnapi->rx_rcb_prod_idx);
4876 * We need to order the read of hw_idx and the read of
4877 * the opaque cookie.
4882 std_prod_idx = tpr->rx_std_prod_idx;
4883 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4884 while (sw_idx != hw_idx && budget > 0) {
4885 struct ring_info *ri;
4886 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4888 struct sk_buff *skb;
4889 dma_addr_t dma_addr;
4890 u32 opaque_key, desc_idx, *post_ptr;
4892 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4893 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4894 if (opaque_key == RXD_OPAQUE_RING_STD) {
4895 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4896 dma_addr = dma_unmap_addr(ri, mapping);
4898 post_ptr = &std_prod_idx;
4900 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4901 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4902 dma_addr = dma_unmap_addr(ri, mapping);
4904 post_ptr = &jmb_prod_idx;
4906 goto next_pkt_nopost;
4908 work_mask |= opaque_key;
4910 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4911 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4913 tg3_recycle_rx(tnapi, tpr, opaque_key,
4914 desc_idx, *post_ptr);
4916 /* Other statistics kept track of by card. */
4921 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4924 if (len > TG3_RX_COPY_THRESH(tp)) {
4927 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4932 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4933 PCI_DMA_FROMDEVICE);
4935 /* Ensure that the update to the skb happens
4936 * after the usage of the old DMA mapping.
4944 struct sk_buff *copy_skb;
4946 tg3_recycle_rx(tnapi, tpr, opaque_key,
4947 desc_idx, *post_ptr);
4949 copy_skb = netdev_alloc_skb(tp->dev, len +
4951 if (copy_skb == NULL)
4952 goto drop_it_no_recycle;
4954 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4955 skb_put(copy_skb, len);
4956 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4957 skb_copy_from_linear_data(skb, copy_skb->data, len);
4958 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4960 /* We'll reuse the original ring buffer. */
4964 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4965 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4966 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4967 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4968 skb->ip_summed = CHECKSUM_UNNECESSARY;
4970 skb_checksum_none_assert(skb);
4972 skb->protocol = eth_type_trans(skb, tp->dev);
4974 if (len > (tp->dev->mtu + ETH_HLEN) &&
4975 skb->protocol != htons(ETH_P_8021Q)) {
4977 goto drop_it_no_recycle;
4980 if (desc->type_flags & RXD_FLAG_VLAN &&
4981 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4982 __vlan_hwaccel_put_tag(skb,
4983 desc->err_vlan & RXD_VLAN_MASK);
4985 napi_gro_receive(&tnapi->napi, skb);
4993 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4994 tpr->rx_std_prod_idx = std_prod_idx &
4995 tp->rx_std_ring_mask;
4996 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4997 tpr->rx_std_prod_idx);
4998 work_mask &= ~RXD_OPAQUE_RING_STD;
5003 sw_idx &= tp->rx_ret_ring_mask;
5005 /* Refresh hw_idx to see if there is new work */
5006 if (sw_idx == hw_idx) {
5007 hw_idx = *(tnapi->rx_rcb_prod_idx);
5012 /* ACK the status ring. */
5013 tnapi->rx_rcb_ptr = sw_idx;
5014 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5016 /* Refill RX ring(s). */
5017 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
5018 if (work_mask & RXD_OPAQUE_RING_STD) {
5019 tpr->rx_std_prod_idx = std_prod_idx &
5020 tp->rx_std_ring_mask;
5021 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5022 tpr->rx_std_prod_idx);
5024 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5025 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5026 tp->rx_jmb_ring_mask;
5027 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5028 tpr->rx_jmb_prod_idx);
5031 } else if (work_mask) {
5032 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5033 * updated before the producer indices can be updated.
5037 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5038 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5040 if (tnapi != &tp->napi[1])
5041 napi_schedule(&tp->napi[1].napi);
5047 static void tg3_poll_link(struct tg3 *tp)
5049 /* handle link change and other phy events */
5050 if (!(tp->tg3_flags &
5051 (TG3_FLAG_USE_LINKCHG_REG |
5052 TG3_FLAG_POLL_SERDES))) {
5053 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5055 if (sblk->status & SD_STATUS_LINK_CHG) {
5056 sblk->status = SD_STATUS_UPDATED |
5057 (sblk->status & ~SD_STATUS_LINK_CHG);
5058 spin_lock(&tp->lock);
5059 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
5061 (MAC_STATUS_SYNC_CHANGED |
5062 MAC_STATUS_CFG_CHANGED |
5063 MAC_STATUS_MI_COMPLETION |
5064 MAC_STATUS_LNKSTATE_CHANGED));
5067 tg3_setup_phy(tp, 0);
5068 spin_unlock(&tp->lock);
5073 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5074 struct tg3_rx_prodring_set *dpr,
5075 struct tg3_rx_prodring_set *spr)
5077 u32 si, di, cpycnt, src_prod_idx;
5081 src_prod_idx = spr->rx_std_prod_idx;
5083 /* Make sure updates to the rx_std_buffers[] entries and the
5084 * standard producer index are seen in the correct order.
5088 if (spr->rx_std_cons_idx == src_prod_idx)
5091 if (spr->rx_std_cons_idx < src_prod_idx)
5092 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5094 cpycnt = tp->rx_std_ring_mask + 1 -
5095 spr->rx_std_cons_idx;
5097 cpycnt = min(cpycnt,
5098 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5100 si = spr->rx_std_cons_idx;
5101 di = dpr->rx_std_prod_idx;
5103 for (i = di; i < di + cpycnt; i++) {
5104 if (dpr->rx_std_buffers[i].skb) {
5114 /* Ensure that updates to the rx_std_buffers ring and the
5115 * shadowed hardware producer ring from tg3_recycle_skb() are
5116 * ordered correctly WRT the skb check above.
5120 memcpy(&dpr->rx_std_buffers[di],
5121 &spr->rx_std_buffers[si],
5122 cpycnt * sizeof(struct ring_info));
5124 for (i = 0; i < cpycnt; i++, di++, si++) {
5125 struct tg3_rx_buffer_desc *sbd, *dbd;
5126 sbd = &spr->rx_std[si];
5127 dbd = &dpr->rx_std[di];
5128 dbd->addr_hi = sbd->addr_hi;
5129 dbd->addr_lo = sbd->addr_lo;
5132 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5133 tp->rx_std_ring_mask;
5134 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5135 tp->rx_std_ring_mask;
5139 src_prod_idx = spr->rx_jmb_prod_idx;
5141 /* Make sure updates to the rx_jmb_buffers[] entries and
5142 * the jumbo producer index are seen in the correct order.
5146 if (spr->rx_jmb_cons_idx == src_prod_idx)
5149 if (spr->rx_jmb_cons_idx < src_prod_idx)
5150 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5152 cpycnt = tp->rx_jmb_ring_mask + 1 -
5153 spr->rx_jmb_cons_idx;
5155 cpycnt = min(cpycnt,
5156 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5158 si = spr->rx_jmb_cons_idx;
5159 di = dpr->rx_jmb_prod_idx;
5161 for (i = di; i < di + cpycnt; i++) {
5162 if (dpr->rx_jmb_buffers[i].skb) {
5172 /* Ensure that updates to the rx_jmb_buffers ring and the
5173 * shadowed hardware producer ring from tg3_recycle_skb() are
5174 * ordered correctly WRT the skb check above.
5178 memcpy(&dpr->rx_jmb_buffers[di],
5179 &spr->rx_jmb_buffers[si],
5180 cpycnt * sizeof(struct ring_info));
5182 for (i = 0; i < cpycnt; i++, di++, si++) {
5183 struct tg3_rx_buffer_desc *sbd, *dbd;
5184 sbd = &spr->rx_jmb[si].std;
5185 dbd = &dpr->rx_jmb[di].std;
5186 dbd->addr_hi = sbd->addr_hi;
5187 dbd->addr_lo = sbd->addr_lo;
5190 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5191 tp->rx_jmb_ring_mask;
5192 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5193 tp->rx_jmb_ring_mask;
5199 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5201 struct tg3 *tp = tnapi->tp;
5203 /* run TX completion thread */
5204 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5206 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5210 /* run RX thread, within the bounds set by NAPI.
5211 * All RX "locking" is done by ensuring outside
5212 * code synchronizes with tg3->napi.poll()
5214 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5215 work_done += tg3_rx(tnapi, budget - work_done);
5217 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
5218 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5220 u32 std_prod_idx = dpr->rx_std_prod_idx;
5221 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5223 for (i = 1; i < tp->irq_cnt; i++)
5224 err |= tg3_rx_prodring_xfer(tp, dpr,
5225 &tp->napi[i].prodring);
5229 if (std_prod_idx != dpr->rx_std_prod_idx)
5230 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5231 dpr->rx_std_prod_idx);
5233 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5234 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5235 dpr->rx_jmb_prod_idx);
5240 tw32_f(HOSTCC_MODE, tp->coal_now);
5246 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5248 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5249 struct tg3 *tp = tnapi->tp;
5251 struct tg3_hw_status *sblk = tnapi->hw_status;
5254 work_done = tg3_poll_work(tnapi, work_done, budget);
5256 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5259 if (unlikely(work_done >= budget))
5262 /* tp->last_tag is used in tg3_int_reenable() below
5263 * to tell the hw how much work has been processed,
5264 * so we must read it before checking for more work.
5266 tnapi->last_tag = sblk->status_tag;
5267 tnapi->last_irq_tag = tnapi->last_tag;
5270 /* check for RX/TX work to do */
5271 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5272 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5273 napi_complete(napi);
5274 /* Reenable interrupts. */
5275 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5284 /* work_done is guaranteed to be less than budget. */
5285 napi_complete(napi);
5286 schedule_work(&tp->reset_task);
5290 static void tg3_process_error(struct tg3 *tp)
5293 bool real_error = false;
5295 if (tp->tg3_flags & TG3_FLAG_ERROR_PROCESSED)
5298 /* Check Flow Attention register */
5299 val = tr32(HOSTCC_FLOW_ATTN);
5300 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5301 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5305 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5306 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5310 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5311 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5320 tp->tg3_flags |= TG3_FLAG_ERROR_PROCESSED;
5321 schedule_work(&tp->reset_task);
5324 static int tg3_poll(struct napi_struct *napi, int budget)
5326 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5327 struct tg3 *tp = tnapi->tp;
5329 struct tg3_hw_status *sblk = tnapi->hw_status;
5332 if (sblk->status & SD_STATUS_ERROR)
5333 tg3_process_error(tp);
5337 work_done = tg3_poll_work(tnapi, work_done, budget);
5339 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5342 if (unlikely(work_done >= budget))
5345 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5346 /* tp->last_tag is used in tg3_int_reenable() below
5347 * to tell the hw how much work has been processed,
5348 * so we must read it before checking for more work.
5350 tnapi->last_tag = sblk->status_tag;
5351 tnapi->last_irq_tag = tnapi->last_tag;
5354 sblk->status &= ~SD_STATUS_UPDATED;
5356 if (likely(!tg3_has_work(tnapi))) {
5357 napi_complete(napi);
5358 tg3_int_reenable(tnapi);
5366 /* work_done is guaranteed to be less than budget. */
5367 napi_complete(napi);
5368 schedule_work(&tp->reset_task);
5372 static void tg3_napi_disable(struct tg3 *tp)
5376 for (i = tp->irq_cnt - 1; i >= 0; i--)
5377 napi_disable(&tp->napi[i].napi);
5380 static void tg3_napi_enable(struct tg3 *tp)
5384 for (i = 0; i < tp->irq_cnt; i++)
5385 napi_enable(&tp->napi[i].napi);
5388 static void tg3_napi_init(struct tg3 *tp)
5392 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5393 for (i = 1; i < tp->irq_cnt; i++)
5394 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5397 static void tg3_napi_fini(struct tg3 *tp)
5401 for (i = 0; i < tp->irq_cnt; i++)
5402 netif_napi_del(&tp->napi[i].napi);
5405 static inline void tg3_netif_stop(struct tg3 *tp)
5407 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5408 tg3_napi_disable(tp);
5409 netif_tx_disable(tp->dev);
5412 static inline void tg3_netif_start(struct tg3 *tp)
5414 /* NOTE: unconditional netif_tx_wake_all_queues is only
5415 * appropriate so long as all callers are assured to
5416 * have free tx slots (such as after tg3_init_hw)
5418 netif_tx_wake_all_queues(tp->dev);
5420 tg3_napi_enable(tp);
5421 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5422 tg3_enable_ints(tp);
5425 static void tg3_irq_quiesce(struct tg3 *tp)
5429 BUG_ON(tp->irq_sync);
5434 for (i = 0; i < tp->irq_cnt; i++)
5435 synchronize_irq(tp->napi[i].irq_vec);
5438 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5439 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5440 * with as well. Most of the time, this is not necessary except when
5441 * shutting down the device.
5443 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5445 spin_lock_bh(&tp->lock);
5447 tg3_irq_quiesce(tp);
5450 static inline void tg3_full_unlock(struct tg3 *tp)
5452 spin_unlock_bh(&tp->lock);
5455 /* One-shot MSI handler - Chip automatically disables interrupt
5456 * after sending MSI so driver doesn't have to do it.
5458 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5460 struct tg3_napi *tnapi = dev_id;
5461 struct tg3 *tp = tnapi->tp;
5463 prefetch(tnapi->hw_status);
5465 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5467 if (likely(!tg3_irq_sync(tp)))
5468 napi_schedule(&tnapi->napi);
5473 /* MSI ISR - No need to check for interrupt sharing and no need to
5474 * flush status block and interrupt mailbox. PCI ordering rules
5475 * guarantee that MSI will arrive after the status block.
5477 static irqreturn_t tg3_msi(int irq, void *dev_id)
5479 struct tg3_napi *tnapi = dev_id;
5480 struct tg3 *tp = tnapi->tp;
5482 prefetch(tnapi->hw_status);
5484 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5486 * Writing any value to intr-mbox-0 clears PCI INTA# and
5487 * chip-internal interrupt pending events.
5488 * Writing non-zero to intr-mbox-0 additional tells the
5489 * NIC to stop sending us irqs, engaging "in-intr-handler"
5492 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5493 if (likely(!tg3_irq_sync(tp)))
5494 napi_schedule(&tnapi->napi);
5496 return IRQ_RETVAL(1);
5499 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5501 struct tg3_napi *tnapi = dev_id;
5502 struct tg3 *tp = tnapi->tp;
5503 struct tg3_hw_status *sblk = tnapi->hw_status;
5504 unsigned int handled = 1;
5506 /* In INTx mode, it is possible for the interrupt to arrive at
5507 * the CPU before the status block posted prior to the interrupt.
5508 * Reading the PCI State register will confirm whether the
5509 * interrupt is ours and will flush the status block.
5511 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5512 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5520 * Writing any value to intr-mbox-0 clears PCI INTA# and
5521 * chip-internal interrupt pending events.
5522 * Writing non-zero to intr-mbox-0 additional tells the
5523 * NIC to stop sending us irqs, engaging "in-intr-handler"
5526 * Flush the mailbox to de-assert the IRQ immediately to prevent
5527 * spurious interrupts. The flush impacts performance but
5528 * excessive spurious interrupts can be worse in some cases.
5530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5531 if (tg3_irq_sync(tp))
5533 sblk->status &= ~SD_STATUS_UPDATED;
5534 if (likely(tg3_has_work(tnapi))) {
5535 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5536 napi_schedule(&tnapi->napi);
5538 /* No work, shared interrupt perhaps? re-enable
5539 * interrupts, and flush that PCI write
5541 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5545 return IRQ_RETVAL(handled);
5548 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5550 struct tg3_napi *tnapi = dev_id;
5551 struct tg3 *tp = tnapi->tp;
5552 struct tg3_hw_status *sblk = tnapi->hw_status;
5553 unsigned int handled = 1;
5555 /* In INTx mode, it is possible for the interrupt to arrive at
5556 * the CPU before the status block posted prior to the interrupt.
5557 * Reading the PCI State register will confirm whether the
5558 * interrupt is ours and will flush the status block.
5560 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5561 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5562 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5569 * writing any value to intr-mbox-0 clears PCI INTA# and
5570 * chip-internal interrupt pending events.
5571 * writing non-zero to intr-mbox-0 additional tells the
5572 * NIC to stop sending us irqs, engaging "in-intr-handler"
5575 * Flush the mailbox to de-assert the IRQ immediately to prevent
5576 * spurious interrupts. The flush impacts performance but
5577 * excessive spurious interrupts can be worse in some cases.
5579 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5582 * In a shared interrupt configuration, sometimes other devices'
5583 * interrupts will scream. We record the current status tag here
5584 * so that the above check can report that the screaming interrupts
5585 * are unhandled. Eventually they will be silenced.
5587 tnapi->last_irq_tag = sblk->status_tag;
5589 if (tg3_irq_sync(tp))
5592 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5594 napi_schedule(&tnapi->napi);
5597 return IRQ_RETVAL(handled);
5600 /* ISR for interrupt test */
5601 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5603 struct tg3_napi *tnapi = dev_id;
5604 struct tg3 *tp = tnapi->tp;
5605 struct tg3_hw_status *sblk = tnapi->hw_status;
5607 if ((sblk->status & SD_STATUS_UPDATED) ||
5608 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5609 tg3_disable_ints(tp);
5610 return IRQ_RETVAL(1);
5612 return IRQ_RETVAL(0);
5615 static int tg3_init_hw(struct tg3 *, int);
5616 static int tg3_halt(struct tg3 *, int, int);
5618 /* Restart hardware after configuration changes, self-test, etc.
5619 * Invoked with tp->lock held.
5621 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5622 __releases(tp->lock)
5623 __acquires(tp->lock)
5627 err = tg3_init_hw(tp, reset_phy);
5630 "Failed to re-initialize device, aborting\n");
5631 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5632 tg3_full_unlock(tp);
5633 del_timer_sync(&tp->timer);
5635 tg3_napi_enable(tp);
5637 tg3_full_lock(tp, 0);
5642 #ifdef CONFIG_NET_POLL_CONTROLLER
5643 static void tg3_poll_controller(struct net_device *dev)
5646 struct tg3 *tp = netdev_priv(dev);
5648 for (i = 0; i < tp->irq_cnt; i++)
5649 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5653 static void tg3_reset_task(struct work_struct *work)
5655 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5657 unsigned int restart_timer;
5659 tg3_full_lock(tp, 0);
5661 if (!netif_running(tp->dev)) {
5662 tg3_full_unlock(tp);
5666 tg3_full_unlock(tp);
5672 tg3_full_lock(tp, 1);
5674 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5675 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5677 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5678 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5679 tp->write32_rx_mbox = tg3_write_flush_reg32;
5680 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5681 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5684 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5685 err = tg3_init_hw(tp, 1);
5689 tg3_netif_start(tp);
5692 mod_timer(&tp->timer, jiffies + 1);
5695 tg3_full_unlock(tp);
5701 static void tg3_tx_timeout(struct net_device *dev)
5703 struct tg3 *tp = netdev_priv(dev);
5705 if (netif_msg_tx_err(tp)) {
5706 netdev_err(dev, "transmit timed out, resetting\n");
5710 schedule_work(&tp->reset_task);
5713 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5714 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5716 u32 base = (u32) mapping & 0xffffffff;
5718 return (base > 0xffffdcc0) && (base + len + 8 < base);
5721 /* Test for DMA addresses > 40-bit */
5722 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5725 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5726 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5727 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5734 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5736 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5737 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5738 struct sk_buff *skb, u32 last_plus_one,
5739 u32 *start, u32 base_flags, u32 mss)
5741 struct tg3 *tp = tnapi->tp;
5742 struct sk_buff *new_skb;
5743 dma_addr_t new_addr = 0;
5747 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5748 new_skb = skb_copy(skb, GFP_ATOMIC);
5750 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5752 new_skb = skb_copy_expand(skb,
5753 skb_headroom(skb) + more_headroom,
5754 skb_tailroom(skb), GFP_ATOMIC);
5760 /* New SKB is guaranteed to be linear. */
5762 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5764 /* Make sure the mapping succeeded */
5765 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5767 dev_kfree_skb(new_skb);
5770 /* Make sure new skb does not cross any 4G boundaries.
5771 * Drop the packet if it does.
5773 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5774 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5775 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5778 dev_kfree_skb(new_skb);
5781 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5782 base_flags, 1 | (mss << 1));
5783 *start = NEXT_TX(entry);
5787 /* Now clean up the sw ring entries. */
5789 while (entry != last_plus_one) {
5793 len = skb_headlen(skb);
5795 len = skb_shinfo(skb)->frags[i-1].size;
5797 pci_unmap_single(tp->pdev,
5798 dma_unmap_addr(&tnapi->tx_buffers[entry],
5800 len, PCI_DMA_TODEVICE);
5802 tnapi->tx_buffers[entry].skb = new_skb;
5803 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5806 tnapi->tx_buffers[entry].skb = NULL;
5808 entry = NEXT_TX(entry);
5817 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5818 dma_addr_t mapping, int len, u32 flags,
5821 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5822 int is_end = (mss_and_is_end & 0x1);
5823 u32 mss = (mss_and_is_end >> 1);
5827 flags |= TXD_FLAG_END;
5828 if (flags & TXD_FLAG_VLAN) {
5829 vlan_tag = flags >> 16;
5832 vlan_tag |= (mss << TXD_MSS_SHIFT);
5834 txd->addr_hi = ((u64) mapping >> 32);
5835 txd->addr_lo = ((u64) mapping & 0xffffffff);
5836 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5837 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5840 /* hard_start_xmit for devices that don't have any bugs and
5841 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5843 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5844 struct net_device *dev)
5846 struct tg3 *tp = netdev_priv(dev);
5847 u32 len, entry, base_flags, mss;
5849 struct tg3_napi *tnapi;
5850 struct netdev_queue *txq;
5851 unsigned int i, last;
5853 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5854 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5855 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5858 /* We are running in BH disabled context with netif_tx_lock
5859 * and TX reclaim runs via tp->napi.poll inside of a software
5860 * interrupt. Furthermore, IRQ processing runs lockless so we have
5861 * no IRQ context deadlocks to worry about either. Rejoice!
5863 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5864 if (!netif_tx_queue_stopped(txq)) {
5865 netif_tx_stop_queue(txq);
5867 /* This is a hard error, log it. */
5869 "BUG! Tx Ring full when queue awake!\n");
5871 return NETDEV_TX_BUSY;
5874 entry = tnapi->tx_prod;
5876 mss = skb_shinfo(skb)->gso_size;
5878 int tcp_opt_len, ip_tcp_len;
5881 if (skb_header_cloned(skb) &&
5882 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5887 if (skb_is_gso_v6(skb)) {
5888 hdrlen = skb_headlen(skb) - ETH_HLEN;
5890 struct iphdr *iph = ip_hdr(skb);
5892 tcp_opt_len = tcp_optlen(skb);
5893 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5896 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5897 hdrlen = ip_tcp_len + tcp_opt_len;
5900 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5901 mss |= (hdrlen & 0xc) << 12;
5903 base_flags |= 0x00000010;
5904 base_flags |= (hdrlen & 0x3e0) << 5;
5908 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5909 TXD_FLAG_CPU_POST_DMA);
5911 tcp_hdr(skb)->check = 0;
5913 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5914 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5917 if (vlan_tx_tag_present(skb))
5918 base_flags |= (TXD_FLAG_VLAN |
5919 (vlan_tx_tag_get(skb) << 16));
5921 len = skb_headlen(skb);
5923 /* Queue skb data, a.k.a. the main skb fragment. */
5924 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5925 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5930 tnapi->tx_buffers[entry].skb = skb;
5931 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5933 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5934 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5935 base_flags |= TXD_FLAG_JMB_PKT;
5937 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5938 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5940 entry = NEXT_TX(entry);
5942 /* Now loop through additional data fragments, and queue them. */
5943 if (skb_shinfo(skb)->nr_frags > 0) {
5944 last = skb_shinfo(skb)->nr_frags - 1;
5945 for (i = 0; i <= last; i++) {
5946 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5949 mapping = pci_map_page(tp->pdev,
5952 len, PCI_DMA_TODEVICE);
5953 if (pci_dma_mapping_error(tp->pdev, mapping))
5956 tnapi->tx_buffers[entry].skb = NULL;
5957 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5960 tg3_set_txd(tnapi, entry, mapping, len,
5961 base_flags, (i == last) | (mss << 1));
5963 entry = NEXT_TX(entry);
5967 /* Packets are ready, update Tx producer idx local and on card. */
5968 tw32_tx_mbox(tnapi->prodmbox, entry);
5970 tnapi->tx_prod = entry;
5971 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5972 netif_tx_stop_queue(txq);
5974 /* netif_tx_stop_queue() must be done before checking
5975 * checking tx index in tg3_tx_avail() below, because in
5976 * tg3_tx(), we update tx index before checking for
5977 * netif_tx_queue_stopped().
5980 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5981 netif_tx_wake_queue(txq);
5987 return NETDEV_TX_OK;
5991 entry = tnapi->tx_prod;
5992 tnapi->tx_buffers[entry].skb = NULL;
5993 pci_unmap_single(tp->pdev,
5994 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5997 for (i = 0; i <= last; i++) {
5998 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5999 entry = NEXT_TX(entry);
6001 pci_unmap_page(tp->pdev,
6002 dma_unmap_addr(&tnapi->tx_buffers[entry],
6004 frag->size, PCI_DMA_TODEVICE);
6008 return NETDEV_TX_OK;
6011 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
6012 struct net_device *);
6014 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6015 * TSO header is greater than 80 bytes.
6017 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6019 struct sk_buff *segs, *nskb;
6020 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6022 /* Estimate the number of fragments in the worst case */
6023 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6024 netif_stop_queue(tp->dev);
6026 /* netif_tx_stop_queue() must be done before checking
6027 * checking tx index in tg3_tx_avail() below, because in
6028 * tg3_tx(), we update tx index before checking for
6029 * netif_tx_queue_stopped().
6032 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6033 return NETDEV_TX_BUSY;
6035 netif_wake_queue(tp->dev);
6038 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6040 goto tg3_tso_bug_end;
6046 tg3_start_xmit_dma_bug(nskb, tp->dev);
6052 return NETDEV_TX_OK;
6055 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6056 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
6058 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6059 struct net_device *dev)
6061 struct tg3 *tp = netdev_priv(dev);
6062 u32 len, entry, base_flags, mss;
6063 int would_hit_hwbug;
6065 struct tg3_napi *tnapi;
6066 struct netdev_queue *txq;
6067 unsigned int i, last;
6069 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6070 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6071 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
6074 /* We are running in BH disabled context with netif_tx_lock
6075 * and TX reclaim runs via tp->napi.poll inside of a software
6076 * interrupt. Furthermore, IRQ processing runs lockless so we have
6077 * no IRQ context deadlocks to worry about either. Rejoice!
6079 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6080 if (!netif_tx_queue_stopped(txq)) {
6081 netif_tx_stop_queue(txq);
6083 /* This is a hard error, log it. */
6085 "BUG! Tx Ring full when queue awake!\n");
6087 return NETDEV_TX_BUSY;
6090 entry = tnapi->tx_prod;
6092 if (skb->ip_summed == CHECKSUM_PARTIAL)
6093 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6095 mss = skb_shinfo(skb)->gso_size;
6098 u32 tcp_opt_len, hdr_len;
6100 if (skb_header_cloned(skb) &&
6101 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6107 tcp_opt_len = tcp_optlen(skb);
6109 if (skb_is_gso_v6(skb)) {
6110 hdr_len = skb_headlen(skb) - ETH_HLEN;
6114 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6115 hdr_len = ip_tcp_len + tcp_opt_len;
6118 iph->tot_len = htons(mss + hdr_len);
6121 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6122 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
6123 return tg3_tso_bug(tp, skb);
6125 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6126 TXD_FLAG_CPU_POST_DMA);
6128 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
6129 tcp_hdr(skb)->check = 0;
6130 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6132 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6137 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
6138 mss |= (hdr_len & 0xc) << 12;
6140 base_flags |= 0x00000010;
6141 base_flags |= (hdr_len & 0x3e0) << 5;
6142 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
6143 mss |= hdr_len << 9;
6144 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
6145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6146 if (tcp_opt_len || iph->ihl > 5) {
6149 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6150 mss |= (tsflags << 11);
6153 if (tcp_opt_len || iph->ihl > 5) {
6156 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6157 base_flags |= tsflags << 12;
6162 if (vlan_tx_tag_present(skb))
6163 base_flags |= (TXD_FLAG_VLAN |
6164 (vlan_tx_tag_get(skb) << 16));
6166 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
6167 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6168 base_flags |= TXD_FLAG_JMB_PKT;
6170 len = skb_headlen(skb);
6172 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6173 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6178 tnapi->tx_buffers[entry].skb = skb;
6179 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6181 would_hit_hwbug = 0;
6183 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
6184 would_hit_hwbug = 1;
6186 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6187 tg3_4g_overflow_test(mapping, len))
6188 would_hit_hwbug = 1;
6190 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6191 tg3_40bit_overflow_test(tp, mapping, len))
6192 would_hit_hwbug = 1;
6194 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
6195 would_hit_hwbug = 1;
6197 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6198 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6200 entry = NEXT_TX(entry);
6202 /* Now loop through additional data fragments, and queue them. */
6203 if (skb_shinfo(skb)->nr_frags > 0) {
6204 last = skb_shinfo(skb)->nr_frags - 1;
6205 for (i = 0; i <= last; i++) {
6206 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6209 mapping = pci_map_page(tp->pdev,
6212 len, PCI_DMA_TODEVICE);
6214 tnapi->tx_buffers[entry].skb = NULL;
6215 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6217 if (pci_dma_mapping_error(tp->pdev, mapping))
6220 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
6222 would_hit_hwbug = 1;
6224 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6225 tg3_4g_overflow_test(mapping, len))
6226 would_hit_hwbug = 1;
6228 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6229 tg3_40bit_overflow_test(tp, mapping, len))
6230 would_hit_hwbug = 1;
6232 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6233 tg3_set_txd(tnapi, entry, mapping, len,
6234 base_flags, (i == last)|(mss << 1));
6236 tg3_set_txd(tnapi, entry, mapping, len,
6237 base_flags, (i == last));
6239 entry = NEXT_TX(entry);
6243 if (would_hit_hwbug) {
6244 u32 last_plus_one = entry;
6247 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6248 start &= (TG3_TX_RING_SIZE - 1);
6250 /* If the workaround fails due to memory/mapping
6251 * failure, silently drop this packet.
6253 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6254 &start, base_flags, mss))
6260 /* Packets are ready, update Tx producer idx local and on card. */
6261 tw32_tx_mbox(tnapi->prodmbox, entry);
6263 tnapi->tx_prod = entry;
6264 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6265 netif_tx_stop_queue(txq);
6267 /* netif_tx_stop_queue() must be done before checking
6268 * checking tx index in tg3_tx_avail() below, because in
6269 * tg3_tx(), we update tx index before checking for
6270 * netif_tx_queue_stopped().
6273 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6274 netif_tx_wake_queue(txq);
6280 return NETDEV_TX_OK;
6284 entry = tnapi->tx_prod;
6285 tnapi->tx_buffers[entry].skb = NULL;
6286 pci_unmap_single(tp->pdev,
6287 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6290 for (i = 0; i <= last; i++) {
6291 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6292 entry = NEXT_TX(entry);
6294 pci_unmap_page(tp->pdev,
6295 dma_unmap_addr(&tnapi->tx_buffers[entry],
6297 frag->size, PCI_DMA_TODEVICE);
6301 return NETDEV_TX_OK;
6304 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6306 struct tg3 *tp = netdev_priv(dev);
6308 if (dev->mtu > ETH_DATA_LEN && (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6309 features &= ~NETIF_F_ALL_TSO;
6314 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6319 if (new_mtu > ETH_DATA_LEN) {
6320 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6321 netdev_update_features(dev);
6322 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6324 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6327 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6328 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6329 netdev_update_features(dev);
6331 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6335 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6337 struct tg3 *tp = netdev_priv(dev);
6340 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6343 if (!netif_running(dev)) {
6344 /* We'll just catch it later when the
6347 tg3_set_mtu(dev, tp, new_mtu);
6355 tg3_full_lock(tp, 1);
6357 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6359 tg3_set_mtu(dev, tp, new_mtu);
6361 err = tg3_restart_hw(tp, 0);
6364 tg3_netif_start(tp);
6366 tg3_full_unlock(tp);
6374 static void tg3_rx_prodring_free(struct tg3 *tp,
6375 struct tg3_rx_prodring_set *tpr)
6379 if (tpr != &tp->napi[0].prodring) {
6380 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6381 i = (i + 1) & tp->rx_std_ring_mask)
6382 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6385 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6386 for (i = tpr->rx_jmb_cons_idx;
6387 i != tpr->rx_jmb_prod_idx;
6388 i = (i + 1) & tp->rx_jmb_ring_mask) {
6389 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6397 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6398 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6401 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6402 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6403 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6404 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6409 /* Initialize rx rings for packet processing.
6411 * The chip has been shut down and the driver detached from
6412 * the networking, so no interrupts or new tx packets will
6413 * end up in the driver. tp->{tx,}lock are held and thus
6416 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6417 struct tg3_rx_prodring_set *tpr)
6419 u32 i, rx_pkt_dma_sz;
6421 tpr->rx_std_cons_idx = 0;
6422 tpr->rx_std_prod_idx = 0;
6423 tpr->rx_jmb_cons_idx = 0;
6424 tpr->rx_jmb_prod_idx = 0;
6426 if (tpr != &tp->napi[0].prodring) {
6427 memset(&tpr->rx_std_buffers[0], 0,
6428 TG3_RX_STD_BUFF_RING_SIZE(tp));
6429 if (tpr->rx_jmb_buffers)
6430 memset(&tpr->rx_jmb_buffers[0], 0,
6431 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6435 /* Zero out all descriptors. */
6436 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6438 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6439 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6440 tp->dev->mtu > ETH_DATA_LEN)
6441 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6442 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6444 /* Initialize invariants of the rings, we only set this
6445 * stuff once. This works because the card does not
6446 * write into the rx buffer posting rings.
6448 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6449 struct tg3_rx_buffer_desc *rxd;
6451 rxd = &tpr->rx_std[i];
6452 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6453 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6454 rxd->opaque = (RXD_OPAQUE_RING_STD |
6455 (i << RXD_OPAQUE_INDEX_SHIFT));
6458 /* Now allocate fresh SKBs for each rx ring. */
6459 for (i = 0; i < tp->rx_pending; i++) {
6460 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6461 netdev_warn(tp->dev,
6462 "Using a smaller RX standard ring. Only "
6463 "%d out of %d buffers were allocated "
6464 "successfully\n", i, tp->rx_pending);
6472 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
6473 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6476 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6478 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6481 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6482 struct tg3_rx_buffer_desc *rxd;
6484 rxd = &tpr->rx_jmb[i].std;
6485 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6486 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6488 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6489 (i << RXD_OPAQUE_INDEX_SHIFT));
6492 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6493 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6494 netdev_warn(tp->dev,
6495 "Using a smaller RX jumbo ring. Only %d "
6496 "out of %d buffers were allocated "
6497 "successfully\n", i, tp->rx_jumbo_pending);
6500 tp->rx_jumbo_pending = i;
6509 tg3_rx_prodring_free(tp, tpr);
6513 static void tg3_rx_prodring_fini(struct tg3 *tp,
6514 struct tg3_rx_prodring_set *tpr)
6516 kfree(tpr->rx_std_buffers);
6517 tpr->rx_std_buffers = NULL;
6518 kfree(tpr->rx_jmb_buffers);
6519 tpr->rx_jmb_buffers = NULL;
6521 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6522 tpr->rx_std, tpr->rx_std_mapping);
6526 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6527 tpr->rx_jmb, tpr->rx_jmb_mapping);
6532 static int tg3_rx_prodring_init(struct tg3 *tp,
6533 struct tg3_rx_prodring_set *tpr)
6535 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6537 if (!tpr->rx_std_buffers)
6540 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6541 TG3_RX_STD_RING_BYTES(tp),
6542 &tpr->rx_std_mapping,
6547 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6548 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6549 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6551 if (!tpr->rx_jmb_buffers)
6554 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6555 TG3_RX_JMB_RING_BYTES(tp),
6556 &tpr->rx_jmb_mapping,
6565 tg3_rx_prodring_fini(tp, tpr);
6569 /* Free up pending packets in all rx/tx rings.
6571 * The chip has been shut down and the driver detached from
6572 * the networking, so no interrupts or new tx packets will
6573 * end up in the driver. tp->{tx,}lock is not held and we are not
6574 * in an interrupt context and thus may sleep.
6576 static void tg3_free_rings(struct tg3 *tp)
6580 for (j = 0; j < tp->irq_cnt; j++) {
6581 struct tg3_napi *tnapi = &tp->napi[j];
6583 tg3_rx_prodring_free(tp, &tnapi->prodring);
6585 if (!tnapi->tx_buffers)
6588 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6589 struct ring_info *txp;
6590 struct sk_buff *skb;
6593 txp = &tnapi->tx_buffers[i];
6601 pci_unmap_single(tp->pdev,
6602 dma_unmap_addr(txp, mapping),
6609 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6610 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6611 pci_unmap_page(tp->pdev,
6612 dma_unmap_addr(txp, mapping),
6613 skb_shinfo(skb)->frags[k].size,
6618 dev_kfree_skb_any(skb);
6623 /* Initialize tx/rx rings for packet processing.
6625 * The chip has been shut down and the driver detached from
6626 * the networking, so no interrupts or new tx packets will
6627 * end up in the driver. tp->{tx,}lock are held and thus
6630 static int tg3_init_rings(struct tg3 *tp)
6634 /* Free up all the SKBs. */
6637 for (i = 0; i < tp->irq_cnt; i++) {
6638 struct tg3_napi *tnapi = &tp->napi[i];
6640 tnapi->last_tag = 0;
6641 tnapi->last_irq_tag = 0;
6642 tnapi->hw_status->status = 0;
6643 tnapi->hw_status->status_tag = 0;
6644 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6649 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6651 tnapi->rx_rcb_ptr = 0;
6653 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6655 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6665 * Must not be invoked with interrupt sources disabled and
6666 * the hardware shutdown down.
6668 static void tg3_free_consistent(struct tg3 *tp)
6672 for (i = 0; i < tp->irq_cnt; i++) {
6673 struct tg3_napi *tnapi = &tp->napi[i];
6675 if (tnapi->tx_ring) {
6676 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6677 tnapi->tx_ring, tnapi->tx_desc_mapping);
6678 tnapi->tx_ring = NULL;
6681 kfree(tnapi->tx_buffers);
6682 tnapi->tx_buffers = NULL;
6684 if (tnapi->rx_rcb) {
6685 dma_free_coherent(&tp->pdev->dev,
6686 TG3_RX_RCB_RING_BYTES(tp),
6688 tnapi->rx_rcb_mapping);
6689 tnapi->rx_rcb = NULL;
6692 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6694 if (tnapi->hw_status) {
6695 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6697 tnapi->status_mapping);
6698 tnapi->hw_status = NULL;
6703 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6704 tp->hw_stats, tp->stats_mapping);
6705 tp->hw_stats = NULL;
6710 * Must not be invoked with interrupt sources disabled and
6711 * the hardware shutdown down. Can sleep.
6713 static int tg3_alloc_consistent(struct tg3 *tp)
6717 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6718 sizeof(struct tg3_hw_stats),
6724 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6726 for (i = 0; i < tp->irq_cnt; i++) {
6727 struct tg3_napi *tnapi = &tp->napi[i];
6728 struct tg3_hw_status *sblk;
6730 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6732 &tnapi->status_mapping,
6734 if (!tnapi->hw_status)
6737 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6738 sblk = tnapi->hw_status;
6740 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6743 /* If multivector TSS is enabled, vector 0 does not handle
6744 * tx interrupts. Don't allocate any resources for it.
6746 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6747 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6748 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6751 if (!tnapi->tx_buffers)
6754 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6756 &tnapi->tx_desc_mapping,
6758 if (!tnapi->tx_ring)
6763 * When RSS is enabled, the status block format changes
6764 * slightly. The "rx_jumbo_consumer", "reserved",
6765 * and "rx_mini_consumer" members get mapped to the
6766 * other three rx return ring producer indexes.
6770 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6773 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6776 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6779 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6784 * If multivector RSS is enabled, vector 0 does not handle
6785 * rx or tx interrupts. Don't allocate any resources for it.
6787 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6790 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6791 TG3_RX_RCB_RING_BYTES(tp),
6792 &tnapi->rx_rcb_mapping,
6797 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6803 tg3_free_consistent(tp);
6807 #define MAX_WAIT_CNT 1000
6809 /* To stop a block, clear the enable bit and poll till it
6810 * clears. tp->lock is held.
6812 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6817 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6824 /* We can't enable/disable these bits of the
6825 * 5705/5750, just say success.
6838 for (i = 0; i < MAX_WAIT_CNT; i++) {
6841 if ((val & enable_bit) == 0)
6845 if (i == MAX_WAIT_CNT && !silent) {
6846 dev_err(&tp->pdev->dev,
6847 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6855 /* tp->lock is held. */
6856 static int tg3_abort_hw(struct tg3 *tp, int silent)
6860 tg3_disable_ints(tp);
6862 tp->rx_mode &= ~RX_MODE_ENABLE;
6863 tw32_f(MAC_RX_MODE, tp->rx_mode);
6866 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6867 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6868 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6869 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6870 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6871 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6873 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6874 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6875 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6876 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6877 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6878 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6879 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6881 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6882 tw32_f(MAC_MODE, tp->mac_mode);
6885 tp->tx_mode &= ~TX_MODE_ENABLE;
6886 tw32_f(MAC_TX_MODE, tp->tx_mode);
6888 for (i = 0; i < MAX_WAIT_CNT; i++) {
6890 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6893 if (i >= MAX_WAIT_CNT) {
6894 dev_err(&tp->pdev->dev,
6895 "%s timed out, TX_MODE_ENABLE will not clear "
6896 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6900 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6901 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6902 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6904 tw32(FTQ_RESET, 0xffffffff);
6905 tw32(FTQ_RESET, 0x00000000);
6907 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6908 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6910 for (i = 0; i < tp->irq_cnt; i++) {
6911 struct tg3_napi *tnapi = &tp->napi[i];
6912 if (tnapi->hw_status)
6913 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6916 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6921 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6926 /* NCSI does not support APE events */
6927 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6930 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6931 if (apedata != APE_SEG_SIG_MAGIC)
6934 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6935 if (!(apedata & APE_FW_STATUS_READY))
6938 /* Wait for up to 1 millisecond for APE to service previous event. */
6939 for (i = 0; i < 10; i++) {
6940 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6943 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6945 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6946 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6947 event | APE_EVENT_STATUS_EVENT_PENDING);
6949 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6951 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6957 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6958 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6961 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6966 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6970 case RESET_KIND_INIT:
6971 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6972 APE_HOST_SEG_SIG_MAGIC);
6973 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6974 APE_HOST_SEG_LEN_MAGIC);
6975 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6976 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6977 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6978 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6979 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6980 APE_HOST_BEHAV_NO_PHYLOCK);
6981 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6982 TG3_APE_HOST_DRVR_STATE_START);
6984 event = APE_EVENT_STATUS_STATE_START;
6986 case RESET_KIND_SHUTDOWN:
6987 /* With the interface we are currently using,
6988 * APE does not track driver state. Wiping
6989 * out the HOST SEGMENT SIGNATURE forces
6990 * the APE to assume OS absent status.
6992 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6994 if (device_may_wakeup(&tp->pdev->dev) &&
6995 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6996 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6997 TG3_APE_HOST_WOL_SPEED_AUTO);
6998 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7000 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7002 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7004 event = APE_EVENT_STATUS_STATE_UNLOAD;
7006 case RESET_KIND_SUSPEND:
7007 event = APE_EVENT_STATUS_STATE_SUSPEND;
7013 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7015 tg3_ape_send_event(tp, event);
7018 /* tp->lock is held. */
7019 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7021 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7022 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7024 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
7026 case RESET_KIND_INIT:
7027 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7031 case RESET_KIND_SHUTDOWN:
7032 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7036 case RESET_KIND_SUSPEND:
7037 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7046 if (kind == RESET_KIND_INIT ||
7047 kind == RESET_KIND_SUSPEND)
7048 tg3_ape_driver_state_change(tp, kind);
7051 /* tp->lock is held. */
7052 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7054 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
7056 case RESET_KIND_INIT:
7057 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7058 DRV_STATE_START_DONE);
7061 case RESET_KIND_SHUTDOWN:
7062 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7063 DRV_STATE_UNLOAD_DONE);
7071 if (kind == RESET_KIND_SHUTDOWN)
7072 tg3_ape_driver_state_change(tp, kind);
7075 /* tp->lock is held. */
7076 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7078 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7080 case RESET_KIND_INIT:
7081 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7085 case RESET_KIND_SHUTDOWN:
7086 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7090 case RESET_KIND_SUSPEND:
7091 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7101 static int tg3_poll_fw(struct tg3 *tp)
7106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7107 /* Wait up to 20ms for init done. */
7108 for (i = 0; i < 200; i++) {
7109 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7116 /* Wait for firmware initialization to complete. */
7117 for (i = 0; i < 100000; i++) {
7118 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7119 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7124 /* Chip might not be fitted with firmware. Some Sun onboard
7125 * parts are configured like that. So don't signal the timeout
7126 * of the above loop as an error, but do report the lack of
7127 * running firmware once.
7130 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
7131 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
7133 netdev_info(tp->dev, "No firmware running\n");
7136 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7137 /* The 57765 A0 needs a little more
7138 * time to do some important work.
7146 /* Save PCI command register before chip reset */
7147 static void tg3_save_pci_state(struct tg3 *tp)
7149 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7152 /* Restore PCI state after chip reset */
7153 static void tg3_restore_pci_state(struct tg3 *tp)
7157 /* Re-enable indirect register accesses. */
7158 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7159 tp->misc_host_ctrl);
7161 /* Set MAX PCI retry to zero. */
7162 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7163 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7164 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
7165 val |= PCISTATE_RETRY_SAME_DMA;
7166 /* Allow reads and writes to the APE register and memory space. */
7167 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7168 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7169 PCISTATE_ALLOW_APE_SHMEM_WR |
7170 PCISTATE_ALLOW_APE_PSPACE_WR;
7171 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7173 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7175 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7176 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7177 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7179 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7180 tp->pci_cacheline_sz);
7181 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7186 /* Make sure PCI-X relaxed ordering bit is clear. */
7187 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7190 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7192 pcix_cmd &= ~PCI_X_CMD_ERO;
7193 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7197 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
7199 /* Chip reset on 5780 will reset MSI enable bit,
7200 * so need to restore it.
7202 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7205 pci_read_config_word(tp->pdev,
7206 tp->msi_cap + PCI_MSI_FLAGS,
7208 pci_write_config_word(tp->pdev,
7209 tp->msi_cap + PCI_MSI_FLAGS,
7210 ctrl | PCI_MSI_FLAGS_ENABLE);
7211 val = tr32(MSGINT_MODE);
7212 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7217 static void tg3_stop_fw(struct tg3 *);
7219 /* tp->lock is held. */
7220 static int tg3_chip_reset(struct tg3 *tp)
7223 void (*write_op)(struct tg3 *, u32, u32);
7228 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7230 /* No matching tg3_nvram_unlock() after this because
7231 * chip reset below will undo the nvram lock.
7233 tp->nvram_lock_cnt = 0;
7235 /* GRC_MISC_CFG core clock reset will clear the memory
7236 * enable bit in PCI register 4 and the MSI enable bit
7237 * on some chips, so we save relevant registers here.
7239 tg3_save_pci_state(tp);
7241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7242 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7243 tw32(GRC_FASTBOOT_PC, 0);
7246 * We must avoid the readl() that normally takes place.
7247 * It locks machines, causes machine checks, and other
7248 * fun things. So, temporarily disable the 5701
7249 * hardware workaround, while we do the reset.
7251 write_op = tp->write32;
7252 if (write_op == tg3_write_flush_reg32)
7253 tp->write32 = tg3_write32;
7255 /* Prevent the irq handler from reading or writing PCI registers
7256 * during chip reset when the memory enable bit in the PCI command
7257 * register may be cleared. The chip does not generate interrupt
7258 * at this time, but the irq handler may still be called due to irq
7259 * sharing or irqpoll.
7261 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
7262 for (i = 0; i < tp->irq_cnt; i++) {
7263 struct tg3_napi *tnapi = &tp->napi[i];
7264 if (tnapi->hw_status) {
7265 tnapi->hw_status->status = 0;
7266 tnapi->hw_status->status_tag = 0;
7268 tnapi->last_tag = 0;
7269 tnapi->last_irq_tag = 0;
7273 for (i = 0; i < tp->irq_cnt; i++)
7274 synchronize_irq(tp->napi[i].irq_vec);
7276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7277 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7278 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7282 val = GRC_MISC_CFG_CORECLK_RESET;
7284 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7285 /* Force PCIe 1.0a mode */
7286 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7287 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
7288 tr32(TG3_PCIE_PHY_TSTCTL) ==
7289 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7290 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7292 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7293 tw32(GRC_MISC_CFG, (1 << 29));
7298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7299 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7300 tw32(GRC_VCPU_EXT_CTRL,
7301 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7304 /* Manage gphy power for all CPMU absent PCIe devices. */
7305 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7306 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7307 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7309 tw32(GRC_MISC_CFG, val);
7311 /* restore 5701 hardware bug workaround write method */
7312 tp->write32 = write_op;
7314 /* Unfortunately, we have to delay before the PCI read back.
7315 * Some 575X chips even will not respond to a PCI cfg access
7316 * when the reset command is given to the chip.
7318 * How do these hardware designers expect things to work
7319 * properly if the PCI write is posted for a long period
7320 * of time? It is always necessary to have some method by
7321 * which a register read back can occur to push the write
7322 * out which does the reset.
7324 * For most tg3 variants the trick below was working.
7329 /* Flush PCI posted writes. The normal MMIO registers
7330 * are inaccessible at this time so this is the only
7331 * way to make this reliably (actually, this is no longer
7332 * the case, see above). I tried to use indirect
7333 * register read/write but this upset some 5701 variants.
7335 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7339 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7342 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7346 /* Wait for link training to complete. */
7347 for (i = 0; i < 5000; i++)
7350 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7351 pci_write_config_dword(tp->pdev, 0xc4,
7352 cfg_val | (1 << 15));
7355 /* Clear the "no snoop" and "relaxed ordering" bits. */
7356 pci_read_config_word(tp->pdev,
7357 tp->pcie_cap + PCI_EXP_DEVCTL,
7359 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7360 PCI_EXP_DEVCTL_NOSNOOP_EN);
7362 * Older PCIe devices only support the 128 byte
7363 * MPS setting. Enforce the restriction.
7365 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7366 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7367 pci_write_config_word(tp->pdev,
7368 tp->pcie_cap + PCI_EXP_DEVCTL,
7371 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7373 /* Clear error status */
7374 pci_write_config_word(tp->pdev,
7375 tp->pcie_cap + PCI_EXP_DEVSTA,
7376 PCI_EXP_DEVSTA_CED |
7377 PCI_EXP_DEVSTA_NFED |
7378 PCI_EXP_DEVSTA_FED |
7379 PCI_EXP_DEVSTA_URD);
7382 tg3_restore_pci_state(tp);
7384 tp->tg3_flags &= ~(TG3_FLAG_CHIP_RESETTING |
7385 TG3_FLAG_ERROR_PROCESSED);
7388 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7389 val = tr32(MEMARB_MODE);
7390 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7392 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7394 tw32(0x5000, 0x400);
7397 tw32(GRC_MODE, tp->grc_mode);
7399 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7402 tw32(0xc4, val | (1 << 15));
7405 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7406 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7407 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7408 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7409 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7410 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7413 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7414 tp->mac_mode = MAC_MODE_APE_TX_EN |
7415 MAC_MODE_APE_RX_EN |
7416 MAC_MODE_TDE_ENABLE;
7418 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7419 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7421 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7422 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7427 tw32_f(MAC_MODE, val);
7430 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7432 err = tg3_poll_fw(tp);
7438 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7439 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7440 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7441 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
7444 tw32(0x7c00, val | (1 << 25));
7447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7448 val = tr32(TG3_CPMU_CLCK_ORIDE);
7449 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7452 /* Reprobe ASF enable state. */
7453 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7454 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7455 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7456 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7459 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7460 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7461 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7462 tp->last_event_jiffies = jiffies;
7463 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7464 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7471 /* tp->lock is held. */
7472 static void tg3_stop_fw(struct tg3 *tp)
7474 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7475 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7476 /* Wait for RX cpu to ACK the previous event. */
7477 tg3_wait_for_event_ack(tp);
7479 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7481 tg3_generate_fw_event(tp);
7483 /* Wait for RX cpu to ACK this event. */
7484 tg3_wait_for_event_ack(tp);
7488 /* tp->lock is held. */
7489 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7495 tg3_write_sig_pre_reset(tp, kind);
7497 tg3_abort_hw(tp, silent);
7498 err = tg3_chip_reset(tp);
7500 __tg3_set_mac_addr(tp, 0);
7502 tg3_write_sig_legacy(tp, kind);
7503 tg3_write_sig_post_reset(tp, kind);
7511 #define RX_CPU_SCRATCH_BASE 0x30000
7512 #define RX_CPU_SCRATCH_SIZE 0x04000
7513 #define TX_CPU_SCRATCH_BASE 0x34000
7514 #define TX_CPU_SCRATCH_SIZE 0x04000
7516 /* tp->lock is held. */
7517 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7521 BUG_ON(offset == TX_CPU_BASE &&
7522 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7525 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7527 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7530 if (offset == RX_CPU_BASE) {
7531 for (i = 0; i < 10000; i++) {
7532 tw32(offset + CPU_STATE, 0xffffffff);
7533 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7534 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7538 tw32(offset + CPU_STATE, 0xffffffff);
7539 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7542 for (i = 0; i < 10000; i++) {
7543 tw32(offset + CPU_STATE, 0xffffffff);
7544 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7545 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7551 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7552 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7556 /* Clear firmware's nvram arbitration. */
7557 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7558 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7563 unsigned int fw_base;
7564 unsigned int fw_len;
7565 const __be32 *fw_data;
7568 /* tp->lock is held. */
7569 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7570 int cpu_scratch_size, struct fw_info *info)
7572 int err, lock_err, i;
7573 void (*write_op)(struct tg3 *, u32, u32);
7575 if (cpu_base == TX_CPU_BASE &&
7576 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7578 "%s: Trying to load TX cpu firmware which is 5705\n",
7583 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7584 write_op = tg3_write_mem;
7586 write_op = tg3_write_indirect_reg32;
7588 /* It is possible that bootcode is still loading at this point.
7589 * Get the nvram lock first before halting the cpu.
7591 lock_err = tg3_nvram_lock(tp);
7592 err = tg3_halt_cpu(tp, cpu_base);
7594 tg3_nvram_unlock(tp);
7598 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7599 write_op(tp, cpu_scratch_base + i, 0);
7600 tw32(cpu_base + CPU_STATE, 0xffffffff);
7601 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7602 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7603 write_op(tp, (cpu_scratch_base +
7604 (info->fw_base & 0xffff) +
7606 be32_to_cpu(info->fw_data[i]));
7614 /* tp->lock is held. */
7615 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7617 struct fw_info info;
7618 const __be32 *fw_data;
7621 fw_data = (void *)tp->fw->data;
7623 /* Firmware blob starts with version numbers, followed by
7624 start address and length. We are setting complete length.
7625 length = end_address_of_bss - start_address_of_text.
7626 Remainder is the blob to be loaded contiguously
7627 from start address. */
7629 info.fw_base = be32_to_cpu(fw_data[1]);
7630 info.fw_len = tp->fw->size - 12;
7631 info.fw_data = &fw_data[3];
7633 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7634 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7639 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7640 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7645 /* Now startup only the RX cpu. */
7646 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7647 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7649 for (i = 0; i < 5; i++) {
7650 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7652 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7653 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7654 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7658 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7659 "should be %08x\n", __func__,
7660 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7663 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7664 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7669 /* tp->lock is held. */
7670 static int tg3_load_tso_firmware(struct tg3 *tp)
7672 struct fw_info info;
7673 const __be32 *fw_data;
7674 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7677 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7680 fw_data = (void *)tp->fw->data;
7682 /* Firmware blob starts with version numbers, followed by
7683 start address and length. We are setting complete length.
7684 length = end_address_of_bss - start_address_of_text.
7685 Remainder is the blob to be loaded contiguously
7686 from start address. */
7688 info.fw_base = be32_to_cpu(fw_data[1]);
7689 cpu_scratch_size = tp->fw_len;
7690 info.fw_len = tp->fw->size - 12;
7691 info.fw_data = &fw_data[3];
7693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7694 cpu_base = RX_CPU_BASE;
7695 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7697 cpu_base = TX_CPU_BASE;
7698 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7699 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7702 err = tg3_load_firmware_cpu(tp, cpu_base,
7703 cpu_scratch_base, cpu_scratch_size,
7708 /* Now startup the cpu. */
7709 tw32(cpu_base + CPU_STATE, 0xffffffff);
7710 tw32_f(cpu_base + CPU_PC, info.fw_base);
7712 for (i = 0; i < 5; i++) {
7713 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7715 tw32(cpu_base + CPU_STATE, 0xffffffff);
7716 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7717 tw32_f(cpu_base + CPU_PC, info.fw_base);
7722 "%s fails to set CPU PC, is %08x should be %08x\n",
7723 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7726 tw32(cpu_base + CPU_STATE, 0xffffffff);
7727 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7732 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7734 struct tg3 *tp = netdev_priv(dev);
7735 struct sockaddr *addr = p;
7736 int err = 0, skip_mac_1 = 0;
7738 if (!is_valid_ether_addr(addr->sa_data))
7741 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7743 if (!netif_running(dev))
7746 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7747 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7749 addr0_high = tr32(MAC_ADDR_0_HIGH);
7750 addr0_low = tr32(MAC_ADDR_0_LOW);
7751 addr1_high = tr32(MAC_ADDR_1_HIGH);
7752 addr1_low = tr32(MAC_ADDR_1_LOW);
7754 /* Skip MAC addr 1 if ASF is using it. */
7755 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7756 !(addr1_high == 0 && addr1_low == 0))
7759 spin_lock_bh(&tp->lock);
7760 __tg3_set_mac_addr(tp, skip_mac_1);
7761 spin_unlock_bh(&tp->lock);
7766 /* tp->lock is held. */
7767 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7768 dma_addr_t mapping, u32 maxlen_flags,
7772 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7773 ((u64) mapping >> 32));
7775 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7776 ((u64) mapping & 0xffffffff));
7778 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7781 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7783 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7787 static void __tg3_set_rx_mode(struct net_device *);
7788 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7792 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7793 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7794 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7795 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7797 tw32(HOSTCC_TXCOL_TICKS, 0);
7798 tw32(HOSTCC_TXMAX_FRAMES, 0);
7799 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7802 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7803 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7804 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7805 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7807 tw32(HOSTCC_RXCOL_TICKS, 0);
7808 tw32(HOSTCC_RXMAX_FRAMES, 0);
7809 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7812 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7813 u32 val = ec->stats_block_coalesce_usecs;
7815 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7816 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7818 if (!netif_carrier_ok(tp->dev))
7821 tw32(HOSTCC_STAT_COAL_TICKS, val);
7824 for (i = 0; i < tp->irq_cnt - 1; i++) {
7827 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7828 tw32(reg, ec->rx_coalesce_usecs);
7829 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7830 tw32(reg, ec->rx_max_coalesced_frames);
7831 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7832 tw32(reg, ec->rx_max_coalesced_frames_irq);
7834 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7835 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7836 tw32(reg, ec->tx_coalesce_usecs);
7837 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7838 tw32(reg, ec->tx_max_coalesced_frames);
7839 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7840 tw32(reg, ec->tx_max_coalesced_frames_irq);
7844 for (; i < tp->irq_max - 1; i++) {
7845 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7846 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7847 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7849 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7850 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7851 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7852 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7857 /* tp->lock is held. */
7858 static void tg3_rings_reset(struct tg3 *tp)
7861 u32 stblk, txrcb, rxrcb, limit;
7862 struct tg3_napi *tnapi = &tp->napi[0];
7864 /* Disable all transmit rings but the first. */
7865 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7866 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7867 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7868 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7869 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7870 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7872 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7874 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7875 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7876 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7877 BDINFO_FLAGS_DISABLED);
7880 /* Disable all receive return rings but the first. */
7881 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7882 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7883 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7884 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7885 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7886 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7887 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7889 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7891 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7892 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7893 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7894 BDINFO_FLAGS_DISABLED);
7896 /* Disable interrupts */
7897 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7899 /* Zero mailbox registers. */
7900 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7901 for (i = 1; i < tp->irq_max; i++) {
7902 tp->napi[i].tx_prod = 0;
7903 tp->napi[i].tx_cons = 0;
7904 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7905 tw32_mailbox(tp->napi[i].prodmbox, 0);
7906 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7907 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7909 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7910 tw32_mailbox(tp->napi[0].prodmbox, 0);
7912 tp->napi[0].tx_prod = 0;
7913 tp->napi[0].tx_cons = 0;
7914 tw32_mailbox(tp->napi[0].prodmbox, 0);
7915 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7918 /* Make sure the NIC-based send BD rings are disabled. */
7919 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7920 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7921 for (i = 0; i < 16; i++)
7922 tw32_tx_mbox(mbox + i * 8, 0);
7925 txrcb = NIC_SRAM_SEND_RCB;
7926 rxrcb = NIC_SRAM_RCV_RET_RCB;
7928 /* Clear status block in ram. */
7929 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7931 /* Set status block DMA address */
7932 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7933 ((u64) tnapi->status_mapping >> 32));
7934 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7935 ((u64) tnapi->status_mapping & 0xffffffff));
7937 if (tnapi->tx_ring) {
7938 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7939 (TG3_TX_RING_SIZE <<
7940 BDINFO_FLAGS_MAXLEN_SHIFT),
7941 NIC_SRAM_TX_BUFFER_DESC);
7942 txrcb += TG3_BDINFO_SIZE;
7945 if (tnapi->rx_rcb) {
7946 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7947 (tp->rx_ret_ring_mask + 1) <<
7948 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7949 rxrcb += TG3_BDINFO_SIZE;
7952 stblk = HOSTCC_STATBLCK_RING1;
7954 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7955 u64 mapping = (u64)tnapi->status_mapping;
7956 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7957 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7959 /* Clear status block in ram. */
7960 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7962 if (tnapi->tx_ring) {
7963 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7964 (TG3_TX_RING_SIZE <<
7965 BDINFO_FLAGS_MAXLEN_SHIFT),
7966 NIC_SRAM_TX_BUFFER_DESC);
7967 txrcb += TG3_BDINFO_SIZE;
7970 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7971 ((tp->rx_ret_ring_mask + 1) <<
7972 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7975 rxrcb += TG3_BDINFO_SIZE;
7979 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7981 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7983 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS) ||
7984 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
7985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7987 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7988 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7989 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7990 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7992 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7994 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7995 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7997 val = min(nic_rep_thresh, host_rep_thresh);
7998 tw32(RCVBDI_STD_THRESH, val);
8000 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
8001 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8003 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
8004 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8007 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8008 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8010 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8012 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8014 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8015 tw32(RCVBDI_JUMBO_THRESH, val);
8017 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
8018 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8021 /* tp->lock is held. */
8022 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8024 u32 val, rdmac_mode;
8026 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8028 tg3_disable_ints(tp);
8032 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8034 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
8035 tg3_abort_hw(tp, 1);
8037 /* Enable MAC control of LPI */
8038 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8039 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8040 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8041 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8043 tw32_f(TG3_CPMU_EEE_CTRL,
8044 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8046 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8047 TG3_CPMU_EEEMD_LPI_IN_TX |
8048 TG3_CPMU_EEEMD_LPI_IN_RX |
8049 TG3_CPMU_EEEMD_EEE_ENABLE;
8051 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8052 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8054 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8055 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8057 tw32_f(TG3_CPMU_EEE_MODE, val);
8059 tw32_f(TG3_CPMU_EEE_DBTMR1,
8060 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8061 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8063 tw32_f(TG3_CPMU_EEE_DBTMR2,
8064 TG3_CPMU_DBTMR2_APE_TX_2047US |
8065 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8071 err = tg3_chip_reset(tp);
8075 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8077 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8078 val = tr32(TG3_CPMU_CTRL);
8079 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8080 tw32(TG3_CPMU_CTRL, val);
8082 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8083 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8084 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8085 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8087 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8088 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8089 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8090 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8092 val = tr32(TG3_CPMU_HST_ACC);
8093 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8094 val |= CPMU_HST_ACC_MACCLK_6_25;
8095 tw32(TG3_CPMU_HST_ACC, val);
8098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8099 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8100 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8101 PCIE_PWR_MGMT_L1_THRESH_4MS;
8102 tw32(PCIE_PWR_MGMT_THRESH, val);
8104 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8105 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8107 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8109 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8110 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8113 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
8114 u32 grc_mode = tr32(GRC_MODE);
8116 /* Access the lower 1K of PL PCIE block registers. */
8117 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8118 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8120 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8121 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8122 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8124 tw32(GRC_MODE, grc_mode);
8127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8128 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8129 u32 grc_mode = tr32(GRC_MODE);
8131 /* Access the lower 1K of PL PCIE block registers. */
8132 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8133 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8135 val = tr32(TG3_PCIE_TLDLPL_PORT +
8136 TG3_PCIE_PL_LO_PHYCTL5);
8137 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8138 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8140 tw32(GRC_MODE, grc_mode);
8143 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8144 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8145 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8146 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8149 /* This works around an issue with Athlon chipsets on
8150 * B3 tigon3 silicon. This bit has no effect on any
8151 * other revision. But do not set this on PCI Express
8152 * chips and don't even touch the clocks if the CPMU is present.
8154 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
8155 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8156 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8157 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8160 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8161 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8162 val = tr32(TG3PCI_PCISTATE);
8163 val |= PCISTATE_RETRY_SAME_DMA;
8164 tw32(TG3PCI_PCISTATE, val);
8167 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
8168 /* Allow reads and writes to the
8169 * APE register and memory space.
8171 val = tr32(TG3PCI_PCISTATE);
8172 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8173 PCISTATE_ALLOW_APE_SHMEM_WR |
8174 PCISTATE_ALLOW_APE_PSPACE_WR;
8175 tw32(TG3PCI_PCISTATE, val);
8178 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8179 /* Enable some hw fixes. */
8180 val = tr32(TG3PCI_MSI_DATA);
8181 val |= (1 << 26) | (1 << 28) | (1 << 29);
8182 tw32(TG3PCI_MSI_DATA, val);
8185 /* Descriptor ring init may make accesses to the
8186 * NIC SRAM area to setup the TX descriptors, so we
8187 * can only do this after the hardware has been
8188 * successfully reset.
8190 err = tg3_init_rings(tp);
8194 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8195 val = tr32(TG3PCI_DMA_RW_CTRL) &
8196 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8197 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8198 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8199 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8200 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8201 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8202 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8203 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8204 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8205 /* This value is determined during the probe time DMA
8206 * engine test, tg3_test_dma.
8208 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8211 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8212 GRC_MODE_4X_NIC_SEND_RINGS |
8213 GRC_MODE_NO_TX_PHDR_CSUM |
8214 GRC_MODE_NO_RX_PHDR_CSUM);
8215 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8217 /* Pseudo-header checksum is done by hardware logic and not
8218 * the offload processers, so make the chip do the pseudo-
8219 * header checksums on receive. For transmit it is more
8220 * convenient to do the pseudo-header checksum in software
8221 * as Linux does that on transmit for us in all cases.
8223 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8227 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8229 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8230 val = tr32(GRC_MISC_CFG);
8232 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8233 tw32(GRC_MISC_CFG, val);
8235 /* Initialize MBUF/DESC pool. */
8236 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8238 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8239 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8241 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8243 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8244 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8245 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8246 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8249 fw_len = tp->fw_len;
8250 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8251 tw32(BUFMGR_MB_POOL_ADDR,
8252 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8253 tw32(BUFMGR_MB_POOL_SIZE,
8254 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8257 if (tp->dev->mtu <= ETH_DATA_LEN) {
8258 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8259 tp->bufmgr_config.mbuf_read_dma_low_water);
8260 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8261 tp->bufmgr_config.mbuf_mac_rx_low_water);
8262 tw32(BUFMGR_MB_HIGH_WATER,
8263 tp->bufmgr_config.mbuf_high_water);
8265 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8266 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8267 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8268 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8269 tw32(BUFMGR_MB_HIGH_WATER,
8270 tp->bufmgr_config.mbuf_high_water_jumbo);
8272 tw32(BUFMGR_DMA_LOW_WATER,
8273 tp->bufmgr_config.dma_low_water);
8274 tw32(BUFMGR_DMA_HIGH_WATER,
8275 tp->bufmgr_config.dma_high_water);
8277 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8279 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8281 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8282 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8283 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8284 tw32(BUFMGR_MODE, val);
8285 for (i = 0; i < 2000; i++) {
8286 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8291 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8295 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8296 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8298 tg3_setup_rxbd_thresholds(tp);
8300 /* Initialize TG3_BDINFO's at:
8301 * RCVDBDI_STD_BD: standard eth size rx ring
8302 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8303 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8306 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8307 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8308 * ring attribute flags
8309 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8311 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8312 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8314 * The size of each ring is fixed in the firmware, but the location is
8317 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8318 ((u64) tpr->rx_std_mapping >> 32));
8319 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8320 ((u64) tpr->rx_std_mapping & 0xffffffff));
8321 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
8322 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8323 NIC_SRAM_RX_BUFFER_DESC);
8325 /* Disable the mini ring */
8326 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8327 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8328 BDINFO_FLAGS_DISABLED);
8330 /* Program the jumbo buffer descriptor ring control
8331 * blocks on those devices that have them.
8333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8334 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8335 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8337 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
8338 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8339 ((u64) tpr->rx_jmb_mapping >> 32));
8340 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8341 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8342 val = TG3_RX_JMB_RING_SIZE(tp) <<
8343 BDINFO_FLAGS_MAXLEN_SHIFT;
8344 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8345 val | BDINFO_FLAGS_USE_EXT_RECV);
8346 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8348 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8349 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8351 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8352 BDINFO_FLAGS_DISABLED);
8355 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
8356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8357 val = TG3_RX_STD_MAX_SIZE_5700;
8359 val = TG3_RX_STD_MAX_SIZE_5717;
8360 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8361 val |= (TG3_RX_STD_DMA_SZ << 2);
8363 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8365 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8367 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8369 tpr->rx_std_prod_idx = tp->rx_pending;
8370 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8372 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
8373 tp->rx_jumbo_pending : 0;
8374 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8376 tg3_rings_reset(tp);
8378 /* Initialize MAC address and backoff seed. */
8379 __tg3_set_mac_addr(tp, 0);
8381 /* MTU + ethernet header + FCS + optional VLAN tag */
8382 tw32(MAC_RX_MTU_SIZE,
8383 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8385 /* The slot time is changed by tg3_setup_phy if we
8386 * run at gigabit with half duplex.
8388 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8389 (6 << TX_LENGTHS_IPG_SHIFT) |
8390 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8393 val |= tr32(MAC_TX_LENGTHS) &
8394 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8395 TX_LENGTHS_CNT_DWN_VAL_MSK);
8397 tw32(MAC_TX_LENGTHS, val);
8399 /* Receive rules. */
8400 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8401 tw32(RCVLPC_CONFIG, 0x0181);
8403 /* Calculate RDMAC_MODE setting early, we need it to determine
8404 * the RCVLPC_STATE_ENABLE mask.
8406 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8407 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8408 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8409 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8410 RDMAC_MODE_LNGREAD_ENAB);
8412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8413 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8418 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8419 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8420 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8423 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8424 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8426 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8427 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8428 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8429 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8433 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8434 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8436 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8437 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8439 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8442 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8445 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8448 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8451 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
8452 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8455 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8456 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8457 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8458 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8459 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8460 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8462 tw32(TG3_RDMA_RSRVCTRL_REG,
8463 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8467 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8468 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8469 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8470 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8471 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8474 /* Receive/send statistics. */
8475 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8476 val = tr32(RCVLPC_STATS_ENABLE);
8477 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8478 tw32(RCVLPC_STATS_ENABLE, val);
8479 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8480 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8481 val = tr32(RCVLPC_STATS_ENABLE);
8482 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8483 tw32(RCVLPC_STATS_ENABLE, val);
8485 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8487 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8488 tw32(SNDDATAI_STATSENAB, 0xffffff);
8489 tw32(SNDDATAI_STATSCTRL,
8490 (SNDDATAI_SCTRL_ENABLE |
8491 SNDDATAI_SCTRL_FASTUPD));
8493 /* Setup host coalescing engine. */
8494 tw32(HOSTCC_MODE, 0);
8495 for (i = 0; i < 2000; i++) {
8496 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8501 __tg3_set_coalesce(tp, &tp->coal);
8503 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8504 /* Status/statistics block address. See tg3_timer,
8505 * the tg3_periodic_fetch_stats call there, and
8506 * tg3_get_stats to see how this works for 5705/5750 chips.
8508 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8509 ((u64) tp->stats_mapping >> 32));
8510 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8511 ((u64) tp->stats_mapping & 0xffffffff));
8512 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8514 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8516 /* Clear statistics and status block memory areas */
8517 for (i = NIC_SRAM_STATS_BLK;
8518 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8520 tg3_write_mem(tp, i, 0);
8525 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8527 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8528 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8529 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8530 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8532 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8533 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8534 /* reset to prevent losing 1st rx packet intermittently */
8535 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8539 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8540 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8543 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8544 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8545 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8546 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8547 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8548 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8549 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8552 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8553 * If TG3_FLG2_IS_NIC is zero, we should read the
8554 * register to preserve the GPIO settings for LOMs. The GPIOs,
8555 * whether used as inputs or outputs, are set by boot code after
8558 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8561 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8562 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8563 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8566 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8567 GRC_LCLCTRL_GPIO_OUTPUT3;
8569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8570 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8572 tp->grc_local_ctrl &= ~gpio_mask;
8573 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8575 /* GPIO1 must be driven high for eeprom write protect */
8576 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8577 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8578 GRC_LCLCTRL_GPIO_OUTPUT1);
8580 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8583 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8585 val = tr32(MSGINT_MODE);
8586 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8587 tw32(MSGINT_MODE, val);
8590 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8591 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8595 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8596 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8597 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8598 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8599 WDMAC_MODE_LNGREAD_ENAB);
8601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8602 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8603 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8604 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8605 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8607 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8608 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8609 val |= WDMAC_MODE_RX_ACCEL;
8613 /* Enable host coalescing bug fix */
8614 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8615 val |= WDMAC_MODE_STATUS_TAG_FIX;
8617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8618 val |= WDMAC_MODE_BURST_ALL_DATA;
8620 tw32_f(WDMAC_MODE, val);
8623 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8626 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8629 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8630 pcix_cmd |= PCI_X_CMD_READ_2K;
8631 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8632 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8633 pcix_cmd |= PCI_X_CMD_READ_2K;
8635 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8639 tw32_f(RDMAC_MODE, rdmac_mode);
8642 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8643 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8644 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8648 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8650 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8652 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8653 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8654 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8655 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
8656 val |= RCVDBDI_MODE_LRG_RING_SZ;
8657 tw32(RCVDBDI_MODE, val);
8658 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8659 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8660 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8661 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8662 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8663 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8664 tw32(SNDBDI_MODE, val);
8665 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8667 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8668 err = tg3_load_5701_a0_firmware_fix(tp);
8673 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8674 err = tg3_load_tso_firmware(tp);
8679 tp->tx_mode = TX_MODE_ENABLE;
8681 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8683 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8686 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8687 tp->tx_mode &= ~val;
8688 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8691 tw32_f(MAC_TX_MODE, tp->tx_mode);
8694 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8695 u32 reg = MAC_RSS_INDIR_TBL_0;
8696 u8 *ent = (u8 *)&val;
8698 /* Setup the indirection table */
8699 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8700 int idx = i % sizeof(val);
8702 ent[idx] = i % (tp->irq_cnt - 1);
8703 if (idx == sizeof(val) - 1) {
8709 /* Setup the "secret" hash key. */
8710 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8711 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8712 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8713 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8714 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8715 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8716 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8717 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8718 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8719 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8722 tp->rx_mode = RX_MODE_ENABLE;
8723 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8724 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8726 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8727 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8728 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8729 RX_MODE_RSS_IPV6_HASH_EN |
8730 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8731 RX_MODE_RSS_IPV4_HASH_EN |
8732 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8734 tw32_f(MAC_RX_MODE, tp->rx_mode);
8737 tw32(MAC_LED_CTRL, tp->led_ctrl);
8739 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8740 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8741 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8744 tw32_f(MAC_RX_MODE, tp->rx_mode);
8747 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8748 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8749 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8750 /* Set drive transmission level to 1.2V */
8751 /* only if the signal pre-emphasis bit is not set */
8752 val = tr32(MAC_SERDES_CFG);
8755 tw32(MAC_SERDES_CFG, val);
8757 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8758 tw32(MAC_SERDES_CFG, 0x616000);
8761 /* Prevent chip from dropping frames when flow control
8764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8768 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8771 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8772 /* Use hardware link auto-negotiation */
8773 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8776 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8777 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8780 tmp = tr32(SERDES_RX_CTRL);
8781 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8782 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8783 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8784 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8787 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8788 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8789 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8790 tp->link_config.speed = tp->link_config.orig_speed;
8791 tp->link_config.duplex = tp->link_config.orig_duplex;
8792 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8795 err = tg3_setup_phy(tp, 0);
8799 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8800 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8803 /* Clear CRC stats. */
8804 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8805 tg3_writephy(tp, MII_TG3_TEST1,
8806 tmp | MII_TG3_TEST1_CRC_EN);
8807 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8812 __tg3_set_rx_mode(tp->dev);
8814 /* Initialize receive rules. */
8815 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8816 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8817 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8818 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8820 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8821 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8825 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8829 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8831 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8833 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8835 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8837 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8839 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8841 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8843 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8845 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8847 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8849 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8851 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8853 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8855 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8863 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8864 /* Write our heartbeat update interval to APE. */
8865 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8866 APE_HOST_HEARTBEAT_INT_DISABLE);
8868 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8873 /* Called at device open time to get the chip ready for
8874 * packet processing. Invoked with tp->lock held.
8876 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8878 tg3_switch_clocks(tp);
8880 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8882 return tg3_reset_hw(tp, reset_phy);
8885 #define TG3_STAT_ADD32(PSTAT, REG) \
8886 do { u32 __val = tr32(REG); \
8887 (PSTAT)->low += __val; \
8888 if ((PSTAT)->low < __val) \
8889 (PSTAT)->high += 1; \
8892 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8894 struct tg3_hw_stats *sp = tp->hw_stats;
8896 if (!netif_carrier_ok(tp->dev))
8899 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8900 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8901 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8902 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8903 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8904 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8905 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8906 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8907 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8908 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8909 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8910 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8911 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8913 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8914 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8915 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8916 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8917 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8918 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8919 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8920 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8921 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8922 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8923 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8924 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8925 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8926 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8928 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8929 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8930 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8932 u32 val = tr32(HOSTCC_FLOW_ATTN);
8933 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8935 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8936 sp->rx_discards.low += val;
8937 if (sp->rx_discards.low < val)
8938 sp->rx_discards.high += 1;
8940 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8942 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8945 static void tg3_timer(unsigned long __opaque)
8947 struct tg3 *tp = (struct tg3 *) __opaque;
8952 spin_lock(&tp->lock);
8954 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8955 /* All of this garbage is because when using non-tagged
8956 * IRQ status the mailbox/status_block protocol the chip
8957 * uses with the cpu is race prone.
8959 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8960 tw32(GRC_LOCAL_CTRL,
8961 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8963 tw32(HOSTCC_MODE, tp->coalesce_mode |
8964 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8967 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8968 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8969 spin_unlock(&tp->lock);
8970 schedule_work(&tp->reset_task);
8975 /* This part only runs once per second. */
8976 if (!--tp->timer_counter) {
8977 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8978 tg3_periodic_fetch_stats(tp);
8980 if (tp->setlpicnt && !--tp->setlpicnt) {
8981 u32 val = tr32(TG3_CPMU_EEE_MODE);
8982 tw32(TG3_CPMU_EEE_MODE,
8983 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8986 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8990 mac_stat = tr32(MAC_STATUS);
8993 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8994 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8996 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9000 tg3_setup_phy(tp, 0);
9001 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
9002 u32 mac_stat = tr32(MAC_STATUS);
9005 if (netif_carrier_ok(tp->dev) &&
9006 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9009 if (!netif_carrier_ok(tp->dev) &&
9010 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9011 MAC_STATUS_SIGNAL_DET))) {
9015 if (!tp->serdes_counter) {
9018 ~MAC_MODE_PORT_MODE_MASK));
9020 tw32_f(MAC_MODE, tp->mac_mode);
9023 tg3_setup_phy(tp, 0);
9025 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9026 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9027 tg3_serdes_parallel_detect(tp);
9030 tp->timer_counter = tp->timer_multiplier;
9033 /* Heartbeat is only sent once every 2 seconds.
9035 * The heartbeat is to tell the ASF firmware that the host
9036 * driver is still alive. In the event that the OS crashes,
9037 * ASF needs to reset the hardware to free up the FIFO space
9038 * that may be filled with rx packets destined for the host.
9039 * If the FIFO is full, ASF will no longer function properly.
9041 * Unintended resets have been reported on real time kernels
9042 * where the timer doesn't run on time. Netpoll will also have
9045 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9046 * to check the ring condition when the heartbeat is expiring
9047 * before doing the reset. This will prevent most unintended
9050 if (!--tp->asf_counter) {
9051 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
9052 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
9053 tg3_wait_for_event_ack(tp);
9055 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9056 FWCMD_NICDRV_ALIVE3);
9057 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9058 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9059 TG3_FW_UPDATE_TIMEOUT_SEC);
9061 tg3_generate_fw_event(tp);
9063 tp->asf_counter = tp->asf_multiplier;
9066 spin_unlock(&tp->lock);
9069 tp->timer.expires = jiffies + tp->timer_offset;
9070 add_timer(&tp->timer);
9073 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9076 unsigned long flags;
9078 struct tg3_napi *tnapi = &tp->napi[irq_num];
9080 if (tp->irq_cnt == 1)
9081 name = tp->dev->name;
9083 name = &tnapi->irq_lbl[0];
9084 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9085 name[IFNAMSIZ-1] = 0;
9088 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9090 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
9095 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9096 fn = tg3_interrupt_tagged;
9097 flags = IRQF_SHARED;
9100 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9103 static int tg3_test_interrupt(struct tg3 *tp)
9105 struct tg3_napi *tnapi = &tp->napi[0];
9106 struct net_device *dev = tp->dev;
9107 int err, i, intr_ok = 0;
9110 if (!netif_running(dev))
9113 tg3_disable_ints(tp);
9115 free_irq(tnapi->irq_vec, tnapi);
9118 * Turn off MSI one shot mode. Otherwise this test has no
9119 * observable way to know whether the interrupt was delivered.
9121 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9122 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9123 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9124 tw32(MSGINT_MODE, val);
9127 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9128 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9132 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9133 tg3_enable_ints(tp);
9135 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9138 for (i = 0; i < 5; i++) {
9139 u32 int_mbox, misc_host_ctrl;
9141 int_mbox = tr32_mailbox(tnapi->int_mbox);
9142 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9144 if ((int_mbox != 0) ||
9145 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9153 tg3_disable_ints(tp);
9155 free_irq(tnapi->irq_vec, tnapi);
9157 err = tg3_request_irq(tp, 0);
9163 /* Reenable MSI one shot mode. */
9164 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9165 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9166 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9167 tw32(MSGINT_MODE, val);
9175 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9176 * successfully restored
9178 static int tg3_test_msi(struct tg3 *tp)
9183 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
9186 /* Turn off SERR reporting in case MSI terminates with Master
9189 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9190 pci_write_config_word(tp->pdev, PCI_COMMAND,
9191 pci_cmd & ~PCI_COMMAND_SERR);
9193 err = tg3_test_interrupt(tp);
9195 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9200 /* other failures */
9204 /* MSI test failed, go back to INTx mode */
9205 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9206 "to INTx mode. Please report this failure to the PCI "
9207 "maintainer and include system chipset information\n");
9209 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9211 pci_disable_msi(tp->pdev);
9213 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
9214 tp->napi[0].irq_vec = tp->pdev->irq;
9216 err = tg3_request_irq(tp, 0);
9220 /* Need to reset the chip because the MSI cycle may have terminated
9221 * with Master Abort.
9223 tg3_full_lock(tp, 1);
9225 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9226 err = tg3_init_hw(tp, 1);
9228 tg3_full_unlock(tp);
9231 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9236 static int tg3_request_firmware(struct tg3 *tp)
9238 const __be32 *fw_data;
9240 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9241 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9246 fw_data = (void *)tp->fw->data;
9248 /* Firmware blob starts with version numbers, followed by
9249 * start address and _full_ length including BSS sections
9250 * (which must be longer than the actual data, of course
9253 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9254 if (tp->fw_len < (tp->fw->size - 12)) {
9255 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9256 tp->fw_len, tp->fw_needed);
9257 release_firmware(tp->fw);
9262 /* We no longer need firmware; we have it. */
9263 tp->fw_needed = NULL;
9267 static bool tg3_enable_msix(struct tg3 *tp)
9269 int i, rc, cpus = num_online_cpus();
9270 struct msix_entry msix_ent[tp->irq_max];
9273 /* Just fallback to the simpler MSI mode. */
9277 * We want as many rx rings enabled as there are cpus.
9278 * The first MSIX vector only deals with link interrupts, etc,
9279 * so we add one to the number of vectors we are requesting.
9281 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9283 for (i = 0; i < tp->irq_max; i++) {
9284 msix_ent[i].entry = i;
9285 msix_ent[i].vector = 0;
9288 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9291 } else if (rc != 0) {
9292 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9294 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9299 for (i = 0; i < tp->irq_max; i++)
9300 tp->napi[i].irq_vec = msix_ent[i].vector;
9302 netif_set_real_num_tx_queues(tp->dev, 1);
9303 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9304 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9305 pci_disable_msix(tp->pdev);
9309 if (tp->irq_cnt > 1) {
9310 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9314 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9315 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9322 static void tg3_ints_init(struct tg3 *tp)
9324 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
9325 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
9326 /* All MSI supporting chips should support tagged
9327 * status. Assert that this is the case.
9329 netdev_warn(tp->dev,
9330 "MSI without TAGGED_STATUS? Not using MSI\n");
9334 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
9335 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
9336 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
9337 pci_enable_msi(tp->pdev) == 0)
9338 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
9340 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9341 u32 msi_mode = tr32(MSGINT_MODE);
9342 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9344 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9345 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9348 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
9350 tp->napi[0].irq_vec = tp->pdev->irq;
9351 netif_set_real_num_tx_queues(tp->dev, 1);
9352 netif_set_real_num_rx_queues(tp->dev, 1);
9356 static void tg3_ints_fini(struct tg3 *tp)
9358 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
9359 pci_disable_msix(tp->pdev);
9360 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
9361 pci_disable_msi(tp->pdev);
9362 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
9363 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
9366 static int tg3_open(struct net_device *dev)
9368 struct tg3 *tp = netdev_priv(dev);
9371 if (tp->fw_needed) {
9372 err = tg3_request_firmware(tp);
9373 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9377 netdev_warn(tp->dev, "TSO capability disabled\n");
9378 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9379 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9380 netdev_notice(tp->dev, "TSO capability restored\n");
9381 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9385 netif_carrier_off(tp->dev);
9387 err = tg3_power_up(tp);
9391 tg3_full_lock(tp, 0);
9393 tg3_disable_ints(tp);
9394 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9396 tg3_full_unlock(tp);
9399 * Setup interrupts first so we know how
9400 * many NAPI resources to allocate
9404 /* The placement of this call is tied
9405 * to the setup and use of Host TX descriptors.
9407 err = tg3_alloc_consistent(tp);
9413 tg3_napi_enable(tp);
9415 for (i = 0; i < tp->irq_cnt; i++) {
9416 struct tg3_napi *tnapi = &tp->napi[i];
9417 err = tg3_request_irq(tp, i);
9419 for (i--; i >= 0; i--)
9420 free_irq(tnapi->irq_vec, tnapi);
9428 tg3_full_lock(tp, 0);
9430 err = tg3_init_hw(tp, 1);
9432 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9435 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9436 tp->timer_offset = HZ;
9438 tp->timer_offset = HZ / 10;
9440 BUG_ON(tp->timer_offset > HZ);
9441 tp->timer_counter = tp->timer_multiplier =
9442 (HZ / tp->timer_offset);
9443 tp->asf_counter = tp->asf_multiplier =
9444 ((HZ / tp->timer_offset) * 2);
9446 init_timer(&tp->timer);
9447 tp->timer.expires = jiffies + tp->timer_offset;
9448 tp->timer.data = (unsigned long) tp;
9449 tp->timer.function = tg3_timer;
9452 tg3_full_unlock(tp);
9457 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
9458 err = tg3_test_msi(tp);
9461 tg3_full_lock(tp, 0);
9462 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9464 tg3_full_unlock(tp);
9469 if (!(tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
9470 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9471 u32 val = tr32(PCIE_TRANSACTION_CFG);
9473 tw32(PCIE_TRANSACTION_CFG,
9474 val | PCIE_TRANS_CFG_1SHOT_MSI);
9480 tg3_full_lock(tp, 0);
9482 add_timer(&tp->timer);
9483 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9484 tg3_enable_ints(tp);
9486 tg3_full_unlock(tp);
9488 netif_tx_start_all_queues(dev);
9493 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9494 struct tg3_napi *tnapi = &tp->napi[i];
9495 free_irq(tnapi->irq_vec, tnapi);
9499 tg3_napi_disable(tp);
9501 tg3_free_consistent(tp);
9508 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9509 struct rtnl_link_stats64 *);
9510 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9512 static int tg3_close(struct net_device *dev)
9515 struct tg3 *tp = netdev_priv(dev);
9517 tg3_napi_disable(tp);
9518 cancel_work_sync(&tp->reset_task);
9520 netif_tx_stop_all_queues(dev);
9522 del_timer_sync(&tp->timer);
9526 tg3_full_lock(tp, 1);
9528 tg3_disable_ints(tp);
9530 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9532 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9534 tg3_full_unlock(tp);
9536 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9537 struct tg3_napi *tnapi = &tp->napi[i];
9538 free_irq(tnapi->irq_vec, tnapi);
9543 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9545 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9546 sizeof(tp->estats_prev));
9550 tg3_free_consistent(tp);
9554 netif_carrier_off(tp->dev);
9559 static inline u64 get_stat64(tg3_stat64_t *val)
9561 return ((u64)val->high << 32) | ((u64)val->low);
9564 static u64 calc_crc_errors(struct tg3 *tp)
9566 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9568 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9569 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9573 spin_lock_bh(&tp->lock);
9574 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9575 tg3_writephy(tp, MII_TG3_TEST1,
9576 val | MII_TG3_TEST1_CRC_EN);
9577 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9580 spin_unlock_bh(&tp->lock);
9582 tp->phy_crc_errors += val;
9584 return tp->phy_crc_errors;
9587 return get_stat64(&hw_stats->rx_fcs_errors);
9590 #define ESTAT_ADD(member) \
9591 estats->member = old_estats->member + \
9592 get_stat64(&hw_stats->member)
9594 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9596 struct tg3_ethtool_stats *estats = &tp->estats;
9597 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9598 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9603 ESTAT_ADD(rx_octets);
9604 ESTAT_ADD(rx_fragments);
9605 ESTAT_ADD(rx_ucast_packets);
9606 ESTAT_ADD(rx_mcast_packets);
9607 ESTAT_ADD(rx_bcast_packets);
9608 ESTAT_ADD(rx_fcs_errors);
9609 ESTAT_ADD(rx_align_errors);
9610 ESTAT_ADD(rx_xon_pause_rcvd);
9611 ESTAT_ADD(rx_xoff_pause_rcvd);
9612 ESTAT_ADD(rx_mac_ctrl_rcvd);
9613 ESTAT_ADD(rx_xoff_entered);
9614 ESTAT_ADD(rx_frame_too_long_errors);
9615 ESTAT_ADD(rx_jabbers);
9616 ESTAT_ADD(rx_undersize_packets);
9617 ESTAT_ADD(rx_in_length_errors);
9618 ESTAT_ADD(rx_out_length_errors);
9619 ESTAT_ADD(rx_64_or_less_octet_packets);
9620 ESTAT_ADD(rx_65_to_127_octet_packets);
9621 ESTAT_ADD(rx_128_to_255_octet_packets);
9622 ESTAT_ADD(rx_256_to_511_octet_packets);
9623 ESTAT_ADD(rx_512_to_1023_octet_packets);
9624 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9625 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9626 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9627 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9628 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9630 ESTAT_ADD(tx_octets);
9631 ESTAT_ADD(tx_collisions);
9632 ESTAT_ADD(tx_xon_sent);
9633 ESTAT_ADD(tx_xoff_sent);
9634 ESTAT_ADD(tx_flow_control);
9635 ESTAT_ADD(tx_mac_errors);
9636 ESTAT_ADD(tx_single_collisions);
9637 ESTAT_ADD(tx_mult_collisions);
9638 ESTAT_ADD(tx_deferred);
9639 ESTAT_ADD(tx_excessive_collisions);
9640 ESTAT_ADD(tx_late_collisions);
9641 ESTAT_ADD(tx_collide_2times);
9642 ESTAT_ADD(tx_collide_3times);
9643 ESTAT_ADD(tx_collide_4times);
9644 ESTAT_ADD(tx_collide_5times);
9645 ESTAT_ADD(tx_collide_6times);
9646 ESTAT_ADD(tx_collide_7times);
9647 ESTAT_ADD(tx_collide_8times);
9648 ESTAT_ADD(tx_collide_9times);
9649 ESTAT_ADD(tx_collide_10times);
9650 ESTAT_ADD(tx_collide_11times);
9651 ESTAT_ADD(tx_collide_12times);
9652 ESTAT_ADD(tx_collide_13times);
9653 ESTAT_ADD(tx_collide_14times);
9654 ESTAT_ADD(tx_collide_15times);
9655 ESTAT_ADD(tx_ucast_packets);
9656 ESTAT_ADD(tx_mcast_packets);
9657 ESTAT_ADD(tx_bcast_packets);
9658 ESTAT_ADD(tx_carrier_sense_errors);
9659 ESTAT_ADD(tx_discards);
9660 ESTAT_ADD(tx_errors);
9662 ESTAT_ADD(dma_writeq_full);
9663 ESTAT_ADD(dma_write_prioq_full);
9664 ESTAT_ADD(rxbds_empty);
9665 ESTAT_ADD(rx_discards);
9666 ESTAT_ADD(rx_errors);
9667 ESTAT_ADD(rx_threshold_hit);
9669 ESTAT_ADD(dma_readq_full);
9670 ESTAT_ADD(dma_read_prioq_full);
9671 ESTAT_ADD(tx_comp_queue_full);
9673 ESTAT_ADD(ring_set_send_prod_index);
9674 ESTAT_ADD(ring_status_update);
9675 ESTAT_ADD(nic_irqs);
9676 ESTAT_ADD(nic_avoided_irqs);
9677 ESTAT_ADD(nic_tx_threshold_hit);
9682 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9683 struct rtnl_link_stats64 *stats)
9685 struct tg3 *tp = netdev_priv(dev);
9686 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9687 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9692 stats->rx_packets = old_stats->rx_packets +
9693 get_stat64(&hw_stats->rx_ucast_packets) +
9694 get_stat64(&hw_stats->rx_mcast_packets) +
9695 get_stat64(&hw_stats->rx_bcast_packets);
9697 stats->tx_packets = old_stats->tx_packets +
9698 get_stat64(&hw_stats->tx_ucast_packets) +
9699 get_stat64(&hw_stats->tx_mcast_packets) +
9700 get_stat64(&hw_stats->tx_bcast_packets);
9702 stats->rx_bytes = old_stats->rx_bytes +
9703 get_stat64(&hw_stats->rx_octets);
9704 stats->tx_bytes = old_stats->tx_bytes +
9705 get_stat64(&hw_stats->tx_octets);
9707 stats->rx_errors = old_stats->rx_errors +
9708 get_stat64(&hw_stats->rx_errors);
9709 stats->tx_errors = old_stats->tx_errors +
9710 get_stat64(&hw_stats->tx_errors) +
9711 get_stat64(&hw_stats->tx_mac_errors) +
9712 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9713 get_stat64(&hw_stats->tx_discards);
9715 stats->multicast = old_stats->multicast +
9716 get_stat64(&hw_stats->rx_mcast_packets);
9717 stats->collisions = old_stats->collisions +
9718 get_stat64(&hw_stats->tx_collisions);
9720 stats->rx_length_errors = old_stats->rx_length_errors +
9721 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9722 get_stat64(&hw_stats->rx_undersize_packets);
9724 stats->rx_over_errors = old_stats->rx_over_errors +
9725 get_stat64(&hw_stats->rxbds_empty);
9726 stats->rx_frame_errors = old_stats->rx_frame_errors +
9727 get_stat64(&hw_stats->rx_align_errors);
9728 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9729 get_stat64(&hw_stats->tx_discards);
9730 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9731 get_stat64(&hw_stats->tx_carrier_sense_errors);
9733 stats->rx_crc_errors = old_stats->rx_crc_errors +
9734 calc_crc_errors(tp);
9736 stats->rx_missed_errors = old_stats->rx_missed_errors +
9737 get_stat64(&hw_stats->rx_discards);
9739 stats->rx_dropped = tp->rx_dropped;
9744 static inline u32 calc_crc(unsigned char *buf, int len)
9752 for (j = 0; j < len; j++) {
9755 for (k = 0; k < 8; k++) {
9768 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9770 /* accept or reject all multicast frames */
9771 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9772 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9773 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9774 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9777 static void __tg3_set_rx_mode(struct net_device *dev)
9779 struct tg3 *tp = netdev_priv(dev);
9782 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9783 RX_MODE_KEEP_VLAN_TAG);
9785 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9786 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9789 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9790 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9793 if (dev->flags & IFF_PROMISC) {
9794 /* Promiscuous mode. */
9795 rx_mode |= RX_MODE_PROMISC;
9796 } else if (dev->flags & IFF_ALLMULTI) {
9797 /* Accept all multicast. */
9798 tg3_set_multi(tp, 1);
9799 } else if (netdev_mc_empty(dev)) {
9800 /* Reject all multicast. */
9801 tg3_set_multi(tp, 0);
9803 /* Accept one or more multicast(s). */
9804 struct netdev_hw_addr *ha;
9805 u32 mc_filter[4] = { 0, };
9810 netdev_for_each_mc_addr(ha, dev) {
9811 crc = calc_crc(ha->addr, ETH_ALEN);
9813 regidx = (bit & 0x60) >> 5;
9815 mc_filter[regidx] |= (1 << bit);
9818 tw32(MAC_HASH_REG_0, mc_filter[0]);
9819 tw32(MAC_HASH_REG_1, mc_filter[1]);
9820 tw32(MAC_HASH_REG_2, mc_filter[2]);
9821 tw32(MAC_HASH_REG_3, mc_filter[3]);
9824 if (rx_mode != tp->rx_mode) {
9825 tp->rx_mode = rx_mode;
9826 tw32_f(MAC_RX_MODE, rx_mode);
9831 static void tg3_set_rx_mode(struct net_device *dev)
9833 struct tg3 *tp = netdev_priv(dev);
9835 if (!netif_running(dev))
9838 tg3_full_lock(tp, 0);
9839 __tg3_set_rx_mode(dev);
9840 tg3_full_unlock(tp);
9843 static int tg3_get_regs_len(struct net_device *dev)
9845 return TG3_REG_BLK_SIZE;
9848 static void tg3_get_regs(struct net_device *dev,
9849 struct ethtool_regs *regs, void *_p)
9851 struct tg3 *tp = netdev_priv(dev);
9855 memset(_p, 0, TG3_REG_BLK_SIZE);
9857 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9860 tg3_full_lock(tp, 0);
9862 tg3_dump_legacy_regs(tp, (u32 *)_p);
9864 tg3_full_unlock(tp);
9867 static int tg3_get_eeprom_len(struct net_device *dev)
9869 struct tg3 *tp = netdev_priv(dev);
9871 return tp->nvram_size;
9874 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9876 struct tg3 *tp = netdev_priv(dev);
9879 u32 i, offset, len, b_offset, b_count;
9882 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9885 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9888 offset = eeprom->offset;
9892 eeprom->magic = TG3_EEPROM_MAGIC;
9895 /* adjustments to start on required 4 byte boundary */
9896 b_offset = offset & 3;
9897 b_count = 4 - b_offset;
9898 if (b_count > len) {
9899 /* i.e. offset=1 len=2 */
9902 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9905 memcpy(data, ((char *)&val) + b_offset, b_count);
9908 eeprom->len += b_count;
9911 /* read bytes up to the last 4 byte boundary */
9912 pd = &data[eeprom->len];
9913 for (i = 0; i < (len - (len & 3)); i += 4) {
9914 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9919 memcpy(pd + i, &val, 4);
9924 /* read last bytes not ending on 4 byte boundary */
9925 pd = &data[eeprom->len];
9927 b_offset = offset + len - b_count;
9928 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9931 memcpy(pd, &val, b_count);
9932 eeprom->len += b_count;
9937 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9939 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9941 struct tg3 *tp = netdev_priv(dev);
9943 u32 offset, len, b_offset, odd_len;
9947 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9950 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9951 eeprom->magic != TG3_EEPROM_MAGIC)
9954 offset = eeprom->offset;
9957 if ((b_offset = (offset & 3))) {
9958 /* adjustments to start on required 4 byte boundary */
9959 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9970 /* adjustments to end on required 4 byte boundary */
9972 len = (len + 3) & ~3;
9973 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9979 if (b_offset || odd_len) {
9980 buf = kmalloc(len, GFP_KERNEL);
9984 memcpy(buf, &start, 4);
9986 memcpy(buf+len-4, &end, 4);
9987 memcpy(buf + b_offset, data, eeprom->len);
9990 ret = tg3_nvram_write_block(tp, offset, len, buf);
9998 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10000 struct tg3 *tp = netdev_priv(dev);
10002 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10003 struct phy_device *phydev;
10004 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10006 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10007 return phy_ethtool_gset(phydev, cmd);
10010 cmd->supported = (SUPPORTED_Autoneg);
10012 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10013 cmd->supported |= (SUPPORTED_1000baseT_Half |
10014 SUPPORTED_1000baseT_Full);
10016 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10017 cmd->supported |= (SUPPORTED_100baseT_Half |
10018 SUPPORTED_100baseT_Full |
10019 SUPPORTED_10baseT_Half |
10020 SUPPORTED_10baseT_Full |
10022 cmd->port = PORT_TP;
10024 cmd->supported |= SUPPORTED_FIBRE;
10025 cmd->port = PORT_FIBRE;
10028 cmd->advertising = tp->link_config.advertising;
10029 if (netif_running(dev)) {
10030 cmd->speed = tp->link_config.active_speed;
10031 cmd->duplex = tp->link_config.active_duplex;
10033 cmd->speed = SPEED_INVALID;
10034 cmd->duplex = DUPLEX_INVALID;
10036 cmd->phy_address = tp->phy_addr;
10037 cmd->transceiver = XCVR_INTERNAL;
10038 cmd->autoneg = tp->link_config.autoneg;
10044 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10046 struct tg3 *tp = netdev_priv(dev);
10048 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10049 struct phy_device *phydev;
10050 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10052 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10053 return phy_ethtool_sset(phydev, cmd);
10056 if (cmd->autoneg != AUTONEG_ENABLE &&
10057 cmd->autoneg != AUTONEG_DISABLE)
10060 if (cmd->autoneg == AUTONEG_DISABLE &&
10061 cmd->duplex != DUPLEX_FULL &&
10062 cmd->duplex != DUPLEX_HALF)
10065 if (cmd->autoneg == AUTONEG_ENABLE) {
10066 u32 mask = ADVERTISED_Autoneg |
10068 ADVERTISED_Asym_Pause;
10070 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10071 mask |= ADVERTISED_1000baseT_Half |
10072 ADVERTISED_1000baseT_Full;
10074 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10075 mask |= ADVERTISED_100baseT_Half |
10076 ADVERTISED_100baseT_Full |
10077 ADVERTISED_10baseT_Half |
10078 ADVERTISED_10baseT_Full |
10081 mask |= ADVERTISED_FIBRE;
10083 if (cmd->advertising & ~mask)
10086 mask &= (ADVERTISED_1000baseT_Half |
10087 ADVERTISED_1000baseT_Full |
10088 ADVERTISED_100baseT_Half |
10089 ADVERTISED_100baseT_Full |
10090 ADVERTISED_10baseT_Half |
10091 ADVERTISED_10baseT_Full);
10093 cmd->advertising &= mask;
10095 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10096 if (cmd->speed != SPEED_1000)
10099 if (cmd->duplex != DUPLEX_FULL)
10102 if (cmd->speed != SPEED_100 &&
10103 cmd->speed != SPEED_10)
10108 tg3_full_lock(tp, 0);
10110 tp->link_config.autoneg = cmd->autoneg;
10111 if (cmd->autoneg == AUTONEG_ENABLE) {
10112 tp->link_config.advertising = (cmd->advertising |
10113 ADVERTISED_Autoneg);
10114 tp->link_config.speed = SPEED_INVALID;
10115 tp->link_config.duplex = DUPLEX_INVALID;
10117 tp->link_config.advertising = 0;
10118 tp->link_config.speed = cmd->speed;
10119 tp->link_config.duplex = cmd->duplex;
10122 tp->link_config.orig_speed = tp->link_config.speed;
10123 tp->link_config.orig_duplex = tp->link_config.duplex;
10124 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10126 if (netif_running(dev))
10127 tg3_setup_phy(tp, 1);
10129 tg3_full_unlock(tp);
10134 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10136 struct tg3 *tp = netdev_priv(dev);
10138 strcpy(info->driver, DRV_MODULE_NAME);
10139 strcpy(info->version, DRV_MODULE_VERSION);
10140 strcpy(info->fw_version, tp->fw_ver);
10141 strcpy(info->bus_info, pci_name(tp->pdev));
10144 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10146 struct tg3 *tp = netdev_priv(dev);
10148 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
10149 device_can_wakeup(&tp->pdev->dev))
10150 wol->supported = WAKE_MAGIC;
10152 wol->supported = 0;
10154 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
10155 device_can_wakeup(&tp->pdev->dev))
10156 wol->wolopts = WAKE_MAGIC;
10157 memset(&wol->sopass, 0, sizeof(wol->sopass));
10160 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10162 struct tg3 *tp = netdev_priv(dev);
10163 struct device *dp = &tp->pdev->dev;
10165 if (wol->wolopts & ~WAKE_MAGIC)
10167 if ((wol->wolopts & WAKE_MAGIC) &&
10168 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
10171 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10173 spin_lock_bh(&tp->lock);
10174 if (device_may_wakeup(dp))
10175 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10177 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10178 spin_unlock_bh(&tp->lock);
10183 static u32 tg3_get_msglevel(struct net_device *dev)
10185 struct tg3 *tp = netdev_priv(dev);
10186 return tp->msg_enable;
10189 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10191 struct tg3 *tp = netdev_priv(dev);
10192 tp->msg_enable = value;
10195 static int tg3_nway_reset(struct net_device *dev)
10197 struct tg3 *tp = netdev_priv(dev);
10200 if (!netif_running(dev))
10203 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10206 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10207 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10209 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10213 spin_lock_bh(&tp->lock);
10215 tg3_readphy(tp, MII_BMCR, &bmcr);
10216 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10217 ((bmcr & BMCR_ANENABLE) ||
10218 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10219 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10223 spin_unlock_bh(&tp->lock);
10229 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10231 struct tg3 *tp = netdev_priv(dev);
10233 ering->rx_max_pending = tp->rx_std_ring_mask;
10234 ering->rx_mini_max_pending = 0;
10235 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10236 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10238 ering->rx_jumbo_max_pending = 0;
10240 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10242 ering->rx_pending = tp->rx_pending;
10243 ering->rx_mini_pending = 0;
10244 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10245 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10247 ering->rx_jumbo_pending = 0;
10249 ering->tx_pending = tp->napi[0].tx_pending;
10252 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10254 struct tg3 *tp = netdev_priv(dev);
10255 int i, irq_sync = 0, err = 0;
10257 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10258 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10259 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10260 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10261 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10262 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10265 if (netif_running(dev)) {
10267 tg3_netif_stop(tp);
10271 tg3_full_lock(tp, irq_sync);
10273 tp->rx_pending = ering->rx_pending;
10275 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10276 tp->rx_pending > 63)
10277 tp->rx_pending = 63;
10278 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10280 for (i = 0; i < tp->irq_max; i++)
10281 tp->napi[i].tx_pending = ering->tx_pending;
10283 if (netif_running(dev)) {
10284 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10285 err = tg3_restart_hw(tp, 1);
10287 tg3_netif_start(tp);
10290 tg3_full_unlock(tp);
10292 if (irq_sync && !err)
10298 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10300 struct tg3 *tp = netdev_priv(dev);
10302 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10304 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10305 epause->rx_pause = 1;
10307 epause->rx_pause = 0;
10309 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10310 epause->tx_pause = 1;
10312 epause->tx_pause = 0;
10315 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10317 struct tg3 *tp = netdev_priv(dev);
10320 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10322 struct phy_device *phydev;
10324 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10326 if (!(phydev->supported & SUPPORTED_Pause) ||
10327 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10328 (epause->rx_pause != epause->tx_pause)))
10331 tp->link_config.flowctrl = 0;
10332 if (epause->rx_pause) {
10333 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10335 if (epause->tx_pause) {
10336 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10337 newadv = ADVERTISED_Pause;
10339 newadv = ADVERTISED_Pause |
10340 ADVERTISED_Asym_Pause;
10341 } else if (epause->tx_pause) {
10342 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10343 newadv = ADVERTISED_Asym_Pause;
10347 if (epause->autoneg)
10348 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10350 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10352 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10353 u32 oldadv = phydev->advertising &
10354 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10355 if (oldadv != newadv) {
10356 phydev->advertising &=
10357 ~(ADVERTISED_Pause |
10358 ADVERTISED_Asym_Pause);
10359 phydev->advertising |= newadv;
10360 if (phydev->autoneg) {
10362 * Always renegotiate the link to
10363 * inform our link partner of our
10364 * flow control settings, even if the
10365 * flow control is forced. Let
10366 * tg3_adjust_link() do the final
10367 * flow control setup.
10369 return phy_start_aneg(phydev);
10373 if (!epause->autoneg)
10374 tg3_setup_flow_control(tp, 0, 0);
10376 tp->link_config.orig_advertising &=
10377 ~(ADVERTISED_Pause |
10378 ADVERTISED_Asym_Pause);
10379 tp->link_config.orig_advertising |= newadv;
10384 if (netif_running(dev)) {
10385 tg3_netif_stop(tp);
10389 tg3_full_lock(tp, irq_sync);
10391 if (epause->autoneg)
10392 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10394 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10395 if (epause->rx_pause)
10396 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10398 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10399 if (epause->tx_pause)
10400 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10402 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10404 if (netif_running(dev)) {
10405 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10406 err = tg3_restart_hw(tp, 1);
10408 tg3_netif_start(tp);
10411 tg3_full_unlock(tp);
10417 static int tg3_get_sset_count(struct net_device *dev, int sset)
10421 return TG3_NUM_TEST;
10423 return TG3_NUM_STATS;
10425 return -EOPNOTSUPP;
10429 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10431 switch (stringset) {
10433 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10436 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10439 WARN_ON(1); /* we need a WARN() */
10444 static int tg3_set_phys_id(struct net_device *dev,
10445 enum ethtool_phys_id_state state)
10447 struct tg3 *tp = netdev_priv(dev);
10449 if (!netif_running(tp->dev))
10453 case ETHTOOL_ID_ACTIVE:
10454 return 1; /* cycle on/off once per second */
10456 case ETHTOOL_ID_ON:
10457 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10458 LED_CTRL_1000MBPS_ON |
10459 LED_CTRL_100MBPS_ON |
10460 LED_CTRL_10MBPS_ON |
10461 LED_CTRL_TRAFFIC_OVERRIDE |
10462 LED_CTRL_TRAFFIC_BLINK |
10463 LED_CTRL_TRAFFIC_LED);
10466 case ETHTOOL_ID_OFF:
10467 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10468 LED_CTRL_TRAFFIC_OVERRIDE);
10471 case ETHTOOL_ID_INACTIVE:
10472 tw32(MAC_LED_CTRL, tp->led_ctrl);
10479 static void tg3_get_ethtool_stats(struct net_device *dev,
10480 struct ethtool_stats *estats, u64 *tmp_stats)
10482 struct tg3 *tp = netdev_priv(dev);
10483 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10486 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10490 u32 offset = 0, len = 0;
10493 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10494 tg3_nvram_read(tp, 0, &magic))
10497 if (magic == TG3_EEPROM_MAGIC) {
10498 for (offset = TG3_NVM_DIR_START;
10499 offset < TG3_NVM_DIR_END;
10500 offset += TG3_NVM_DIRENT_SIZE) {
10501 if (tg3_nvram_read(tp, offset, &val))
10504 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10505 TG3_NVM_DIRTYPE_EXTVPD)
10509 if (offset != TG3_NVM_DIR_END) {
10510 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10511 if (tg3_nvram_read(tp, offset + 4, &offset))
10514 offset = tg3_nvram_logical_addr(tp, offset);
10518 if (!offset || !len) {
10519 offset = TG3_NVM_VPD_OFF;
10520 len = TG3_NVM_VPD_LEN;
10523 buf = kmalloc(len, GFP_KERNEL);
10527 if (magic == TG3_EEPROM_MAGIC) {
10528 for (i = 0; i < len; i += 4) {
10529 /* The data is in little-endian format in NVRAM.
10530 * Use the big-endian read routines to preserve
10531 * the byte order as it exists in NVRAM.
10533 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10539 unsigned int pos = 0;
10541 ptr = (u8 *)&buf[0];
10542 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10543 cnt = pci_read_vpd(tp->pdev, pos,
10545 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10561 #define NVRAM_TEST_SIZE 0x100
10562 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10563 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10564 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10565 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10566 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10568 static int tg3_test_nvram(struct tg3 *tp)
10572 int i, j, k, err = 0, size;
10574 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10577 if (tg3_nvram_read(tp, 0, &magic) != 0)
10580 if (magic == TG3_EEPROM_MAGIC)
10581 size = NVRAM_TEST_SIZE;
10582 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10583 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10584 TG3_EEPROM_SB_FORMAT_1) {
10585 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10586 case TG3_EEPROM_SB_REVISION_0:
10587 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10589 case TG3_EEPROM_SB_REVISION_2:
10590 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10592 case TG3_EEPROM_SB_REVISION_3:
10593 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10600 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10601 size = NVRAM_SELFBOOT_HW_SIZE;
10605 buf = kmalloc(size, GFP_KERNEL);
10610 for (i = 0, j = 0; i < size; i += 4, j++) {
10611 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10618 /* Selfboot format */
10619 magic = be32_to_cpu(buf[0]);
10620 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10621 TG3_EEPROM_MAGIC_FW) {
10622 u8 *buf8 = (u8 *) buf, csum8 = 0;
10624 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10625 TG3_EEPROM_SB_REVISION_2) {
10626 /* For rev 2, the csum doesn't include the MBA. */
10627 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10629 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10632 for (i = 0; i < size; i++)
10645 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10646 TG3_EEPROM_MAGIC_HW) {
10647 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10648 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10649 u8 *buf8 = (u8 *) buf;
10651 /* Separate the parity bits and the data bytes. */
10652 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10653 if ((i == 0) || (i == 8)) {
10657 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10658 parity[k++] = buf8[i] & msk;
10660 } else if (i == 16) {
10664 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10665 parity[k++] = buf8[i] & msk;
10668 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10669 parity[k++] = buf8[i] & msk;
10672 data[j++] = buf8[i];
10676 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10677 u8 hw8 = hweight8(data[i]);
10679 if ((hw8 & 0x1) && parity[i])
10681 else if (!(hw8 & 0x1) && !parity[i])
10690 /* Bootstrap checksum at offset 0x10 */
10691 csum = calc_crc((unsigned char *) buf, 0x10);
10692 if (csum != le32_to_cpu(buf[0x10/4]))
10695 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10696 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10697 if (csum != le32_to_cpu(buf[0xfc/4]))
10702 buf = tg3_vpd_readblock(tp);
10706 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10707 PCI_VPD_LRDT_RO_DATA);
10709 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10713 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10716 i += PCI_VPD_LRDT_TAG_SIZE;
10717 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10718 PCI_VPD_RO_KEYWORD_CHKSUM);
10722 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10724 for (i = 0; i <= j; i++)
10725 csum8 += ((u8 *)buf)[i];
10739 #define TG3_SERDES_TIMEOUT_SEC 2
10740 #define TG3_COPPER_TIMEOUT_SEC 6
10742 static int tg3_test_link(struct tg3 *tp)
10746 if (!netif_running(tp->dev))
10749 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10750 max = TG3_SERDES_TIMEOUT_SEC;
10752 max = TG3_COPPER_TIMEOUT_SEC;
10754 for (i = 0; i < max; i++) {
10755 if (netif_carrier_ok(tp->dev))
10758 if (msleep_interruptible(1000))
10765 /* Only test the commonly used registers */
10766 static int tg3_test_registers(struct tg3 *tp)
10768 int i, is_5705, is_5750;
10769 u32 offset, read_mask, write_mask, val, save_val, read_val;
10773 #define TG3_FL_5705 0x1
10774 #define TG3_FL_NOT_5705 0x2
10775 #define TG3_FL_NOT_5788 0x4
10776 #define TG3_FL_NOT_5750 0x8
10780 /* MAC Control Registers */
10781 { MAC_MODE, TG3_FL_NOT_5705,
10782 0x00000000, 0x00ef6f8c },
10783 { MAC_MODE, TG3_FL_5705,
10784 0x00000000, 0x01ef6b8c },
10785 { MAC_STATUS, TG3_FL_NOT_5705,
10786 0x03800107, 0x00000000 },
10787 { MAC_STATUS, TG3_FL_5705,
10788 0x03800100, 0x00000000 },
10789 { MAC_ADDR_0_HIGH, 0x0000,
10790 0x00000000, 0x0000ffff },
10791 { MAC_ADDR_0_LOW, 0x0000,
10792 0x00000000, 0xffffffff },
10793 { MAC_RX_MTU_SIZE, 0x0000,
10794 0x00000000, 0x0000ffff },
10795 { MAC_TX_MODE, 0x0000,
10796 0x00000000, 0x00000070 },
10797 { MAC_TX_LENGTHS, 0x0000,
10798 0x00000000, 0x00003fff },
10799 { MAC_RX_MODE, TG3_FL_NOT_5705,
10800 0x00000000, 0x000007fc },
10801 { MAC_RX_MODE, TG3_FL_5705,
10802 0x00000000, 0x000007dc },
10803 { MAC_HASH_REG_0, 0x0000,
10804 0x00000000, 0xffffffff },
10805 { MAC_HASH_REG_1, 0x0000,
10806 0x00000000, 0xffffffff },
10807 { MAC_HASH_REG_2, 0x0000,
10808 0x00000000, 0xffffffff },
10809 { MAC_HASH_REG_3, 0x0000,
10810 0x00000000, 0xffffffff },
10812 /* Receive Data and Receive BD Initiator Control Registers. */
10813 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10814 0x00000000, 0xffffffff },
10815 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10816 0x00000000, 0xffffffff },
10817 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10818 0x00000000, 0x00000003 },
10819 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10820 0x00000000, 0xffffffff },
10821 { RCVDBDI_STD_BD+0, 0x0000,
10822 0x00000000, 0xffffffff },
10823 { RCVDBDI_STD_BD+4, 0x0000,
10824 0x00000000, 0xffffffff },
10825 { RCVDBDI_STD_BD+8, 0x0000,
10826 0x00000000, 0xffff0002 },
10827 { RCVDBDI_STD_BD+0xc, 0x0000,
10828 0x00000000, 0xffffffff },
10830 /* Receive BD Initiator Control Registers. */
10831 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10832 0x00000000, 0xffffffff },
10833 { RCVBDI_STD_THRESH, TG3_FL_5705,
10834 0x00000000, 0x000003ff },
10835 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10836 0x00000000, 0xffffffff },
10838 /* Host Coalescing Control Registers. */
10839 { HOSTCC_MODE, TG3_FL_NOT_5705,
10840 0x00000000, 0x00000004 },
10841 { HOSTCC_MODE, TG3_FL_5705,
10842 0x00000000, 0x000000f6 },
10843 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10844 0x00000000, 0xffffffff },
10845 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10846 0x00000000, 0x000003ff },
10847 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10848 0x00000000, 0xffffffff },
10849 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10850 0x00000000, 0x000003ff },
10851 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10852 0x00000000, 0xffffffff },
10853 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10854 0x00000000, 0x000000ff },
10855 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10856 0x00000000, 0xffffffff },
10857 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10858 0x00000000, 0x000000ff },
10859 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10860 0x00000000, 0xffffffff },
10861 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10862 0x00000000, 0xffffffff },
10863 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10864 0x00000000, 0xffffffff },
10865 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10866 0x00000000, 0x000000ff },
10867 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10868 0x00000000, 0xffffffff },
10869 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10870 0x00000000, 0x000000ff },
10871 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10872 0x00000000, 0xffffffff },
10873 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10874 0x00000000, 0xffffffff },
10875 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10876 0x00000000, 0xffffffff },
10877 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10878 0x00000000, 0xffffffff },
10879 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10880 0x00000000, 0xffffffff },
10881 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10882 0xffffffff, 0x00000000 },
10883 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10884 0xffffffff, 0x00000000 },
10886 /* Buffer Manager Control Registers. */
10887 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10888 0x00000000, 0x007fff80 },
10889 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10890 0x00000000, 0x007fffff },
10891 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10892 0x00000000, 0x0000003f },
10893 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10894 0x00000000, 0x000001ff },
10895 { BUFMGR_MB_HIGH_WATER, 0x0000,
10896 0x00000000, 0x000001ff },
10897 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10898 0xffffffff, 0x00000000 },
10899 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10900 0xffffffff, 0x00000000 },
10902 /* Mailbox Registers */
10903 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10904 0x00000000, 0x000001ff },
10905 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10906 0x00000000, 0x000001ff },
10907 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10908 0x00000000, 0x000007ff },
10909 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10910 0x00000000, 0x000001ff },
10912 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10915 is_5705 = is_5750 = 0;
10916 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10918 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10922 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10923 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10926 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10929 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10930 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10933 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10936 offset = (u32) reg_tbl[i].offset;
10937 read_mask = reg_tbl[i].read_mask;
10938 write_mask = reg_tbl[i].write_mask;
10940 /* Save the original register content */
10941 save_val = tr32(offset);
10943 /* Determine the read-only value. */
10944 read_val = save_val & read_mask;
10946 /* Write zero to the register, then make sure the read-only bits
10947 * are not changed and the read/write bits are all zeros.
10951 val = tr32(offset);
10953 /* Test the read-only and read/write bits. */
10954 if (((val & read_mask) != read_val) || (val & write_mask))
10957 /* Write ones to all the bits defined by RdMask and WrMask, then
10958 * make sure the read-only bits are not changed and the
10959 * read/write bits are all ones.
10961 tw32(offset, read_mask | write_mask);
10963 val = tr32(offset);
10965 /* Test the read-only bits. */
10966 if ((val & read_mask) != read_val)
10969 /* Test the read/write bits. */
10970 if ((val & write_mask) != write_mask)
10973 tw32(offset, save_val);
10979 if (netif_msg_hw(tp))
10980 netdev_err(tp->dev,
10981 "Register test failed at offset %x\n", offset);
10982 tw32(offset, save_val);
10986 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10988 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10992 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10993 for (j = 0; j < len; j += 4) {
10996 tg3_write_mem(tp, offset + j, test_pattern[i]);
10997 tg3_read_mem(tp, offset + j, &val);
10998 if (val != test_pattern[i])
11005 static int tg3_test_memory(struct tg3 *tp)
11007 static struct mem_entry {
11010 } mem_tbl_570x[] = {
11011 { 0x00000000, 0x00b50},
11012 { 0x00002000, 0x1c000},
11013 { 0xffffffff, 0x00000}
11014 }, mem_tbl_5705[] = {
11015 { 0x00000100, 0x0000c},
11016 { 0x00000200, 0x00008},
11017 { 0x00004000, 0x00800},
11018 { 0x00006000, 0x01000},
11019 { 0x00008000, 0x02000},
11020 { 0x00010000, 0x0e000},
11021 { 0xffffffff, 0x00000}
11022 }, mem_tbl_5755[] = {
11023 { 0x00000200, 0x00008},
11024 { 0x00004000, 0x00800},
11025 { 0x00006000, 0x00800},
11026 { 0x00008000, 0x02000},
11027 { 0x00010000, 0x0c000},
11028 { 0xffffffff, 0x00000}
11029 }, mem_tbl_5906[] = {
11030 { 0x00000200, 0x00008},
11031 { 0x00004000, 0x00400},
11032 { 0x00006000, 0x00400},
11033 { 0x00008000, 0x01000},
11034 { 0x00010000, 0x01000},
11035 { 0xffffffff, 0x00000}
11036 }, mem_tbl_5717[] = {
11037 { 0x00000200, 0x00008},
11038 { 0x00010000, 0x0a000},
11039 { 0x00020000, 0x13c00},
11040 { 0xffffffff, 0x00000}
11041 }, mem_tbl_57765[] = {
11042 { 0x00000200, 0x00008},
11043 { 0x00004000, 0x00800},
11044 { 0x00006000, 0x09800},
11045 { 0x00010000, 0x0a000},
11046 { 0xffffffff, 0x00000}
11048 struct mem_entry *mem_tbl;
11052 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
11053 mem_tbl = mem_tbl_5717;
11054 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11055 mem_tbl = mem_tbl_57765;
11056 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
11057 mem_tbl = mem_tbl_5755;
11058 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11059 mem_tbl = mem_tbl_5906;
11060 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
11061 mem_tbl = mem_tbl_5705;
11063 mem_tbl = mem_tbl_570x;
11065 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11066 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11074 #define TG3_MAC_LOOPBACK 0
11075 #define TG3_PHY_LOOPBACK 1
11076 #define TG3_TSO_LOOPBACK 2
11078 #define TG3_TSO_MSS 500
11080 #define TG3_TSO_IP_HDR_LEN 20
11081 #define TG3_TSO_TCP_HDR_LEN 20
11082 #define TG3_TSO_TCP_OPT_LEN 12
11084 static const u8 tg3_tso_header[] = {
11086 0x45, 0x00, 0x00, 0x00,
11087 0x00, 0x00, 0x40, 0x00,
11088 0x40, 0x06, 0x00, 0x00,
11089 0x0a, 0x00, 0x00, 0x01,
11090 0x0a, 0x00, 0x00, 0x02,
11091 0x0d, 0x00, 0xe0, 0x00,
11092 0x00, 0x00, 0x01, 0x00,
11093 0x00, 0x00, 0x02, 0x00,
11094 0x80, 0x10, 0x10, 0x00,
11095 0x14, 0x09, 0x00, 0x00,
11096 0x01, 0x01, 0x08, 0x0a,
11097 0x11, 0x11, 0x11, 0x11,
11098 0x11, 0x11, 0x11, 0x11,
11101 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11103 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11104 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11105 struct sk_buff *skb, *rx_skb;
11108 int num_pkts, tx_len, rx_len, i, err;
11109 struct tg3_rx_buffer_desc *desc;
11110 struct tg3_napi *tnapi, *rnapi;
11111 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11113 tnapi = &tp->napi[0];
11114 rnapi = &tp->napi[0];
11115 if (tp->irq_cnt > 1) {
11116 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
11117 rnapi = &tp->napi[1];
11118 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
11119 tnapi = &tp->napi[1];
11121 coal_now = tnapi->coal_now | rnapi->coal_now;
11123 if (loopback_mode == TG3_MAC_LOOPBACK) {
11124 /* HW errata - mac loopback fails in some cases on 5780.
11125 * Normal traffic and PHY loopback are not affected by
11126 * errata. Also, the MAC loopback test is deprecated for
11127 * all newer ASIC revisions.
11129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11130 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
11133 mac_mode = tp->mac_mode &
11134 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11135 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11136 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11137 mac_mode |= MAC_MODE_LINK_POLARITY;
11138 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11139 mac_mode |= MAC_MODE_PORT_MODE_MII;
11141 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11142 tw32(MAC_MODE, mac_mode);
11144 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11145 tg3_phy_fet_toggle_apd(tp, false);
11146 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11148 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11150 tg3_phy_toggle_automdix(tp, 0);
11152 tg3_writephy(tp, MII_BMCR, val);
11155 mac_mode = tp->mac_mode &
11156 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11157 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11158 tg3_writephy(tp, MII_TG3_FET_PTEST,
11159 MII_TG3_FET_PTEST_FRC_TX_LINK |
11160 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11161 /* The write needs to be flushed for the AC131 */
11162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11163 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11164 mac_mode |= MAC_MODE_PORT_MODE_MII;
11166 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11168 /* reset to prevent losing 1st rx packet intermittently */
11169 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11170 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11172 tw32_f(MAC_RX_MODE, tp->rx_mode);
11174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11175 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11176 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11177 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11178 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11179 mac_mode |= MAC_MODE_LINK_POLARITY;
11180 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11181 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11183 tw32(MAC_MODE, mac_mode);
11185 /* Wait for link */
11186 for (i = 0; i < 100; i++) {
11187 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11196 skb = netdev_alloc_skb(tp->dev, tx_len);
11200 tx_data = skb_put(skb, tx_len);
11201 memcpy(tx_data, tp->dev->dev_addr, 6);
11202 memset(tx_data + 6, 0x0, 8);
11204 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11206 if (loopback_mode == TG3_TSO_LOOPBACK) {
11207 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11209 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11210 TG3_TSO_TCP_OPT_LEN;
11212 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11213 sizeof(tg3_tso_header));
11216 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11217 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11219 /* Set the total length field in the IP header */
11220 iph->tot_len = htons((u16)(mss + hdr_len));
11222 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11223 TXD_FLAG_CPU_POST_DMA);
11225 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11227 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11228 th = (struct tcphdr *)&tx_data[val];
11231 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11233 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
11234 mss |= (hdr_len & 0xc) << 12;
11235 if (hdr_len & 0x10)
11236 base_flags |= 0x00000010;
11237 base_flags |= (hdr_len & 0x3e0) << 5;
11238 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11239 mss |= hdr_len << 9;
11240 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
11241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11242 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11244 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11247 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11250 data_off = ETH_HLEN;
11253 for (i = data_off; i < tx_len; i++)
11254 tx_data[i] = (u8) (i & 0xff);
11256 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11257 if (pci_dma_mapping_error(tp->pdev, map)) {
11258 dev_kfree_skb(skb);
11262 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11267 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11269 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11270 base_flags, (mss << 1) | 1);
11274 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11275 tr32_mailbox(tnapi->prodmbox);
11279 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11280 for (i = 0; i < 35; i++) {
11281 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11286 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11287 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11288 if ((tx_idx == tnapi->tx_prod) &&
11289 (rx_idx == (rx_start_idx + num_pkts)))
11293 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11294 dev_kfree_skb(skb);
11296 if (tx_idx != tnapi->tx_prod)
11299 if (rx_idx != rx_start_idx + num_pkts)
11303 while (rx_idx != rx_start_idx) {
11304 desc = &rnapi->rx_rcb[rx_start_idx++];
11305 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11306 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11308 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11309 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11312 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11315 if (loopback_mode != TG3_TSO_LOOPBACK) {
11316 if (rx_len != tx_len)
11319 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11320 if (opaque_key != RXD_OPAQUE_RING_STD)
11323 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11326 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11327 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11328 >> RXD_TCPCSUM_SHIFT == 0xffff) {
11332 if (opaque_key == RXD_OPAQUE_RING_STD) {
11333 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11334 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11336 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11337 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11338 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11343 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11344 PCI_DMA_FROMDEVICE);
11346 for (i = data_off; i < rx_len; i++, val++) {
11347 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11354 /* tg3_free_rings will unmap and free the rx_skb */
11359 #define TG3_STD_LOOPBACK_FAILED 1
11360 #define TG3_JMB_LOOPBACK_FAILED 2
11361 #define TG3_TSO_LOOPBACK_FAILED 4
11363 #define TG3_MAC_LOOPBACK_SHIFT 0
11364 #define TG3_PHY_LOOPBACK_SHIFT 4
11365 #define TG3_LOOPBACK_FAILED 0x00000077
11367 static int tg3_test_loopback(struct tg3 *tp)
11370 u32 eee_cap, cpmuctrl = 0;
11372 if (!netif_running(tp->dev))
11373 return TG3_LOOPBACK_FAILED;
11375 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11376 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11378 err = tg3_reset_hw(tp, 1);
11380 err = TG3_LOOPBACK_FAILED;
11384 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
11387 /* Reroute all rx packets to the 1st queue */
11388 for (i = MAC_RSS_INDIR_TBL_0;
11389 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11393 /* Turn off gphy autopowerdown. */
11394 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11395 tg3_phy_toggle_apd(tp, false);
11397 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11401 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11403 /* Wait for up to 40 microseconds to acquire lock. */
11404 for (i = 0; i < 4; i++) {
11405 status = tr32(TG3_CPMU_MUTEX_GNT);
11406 if (status == CPMU_MUTEX_GNT_DRIVER)
11411 if (status != CPMU_MUTEX_GNT_DRIVER) {
11412 err = TG3_LOOPBACK_FAILED;
11416 /* Turn off link-based power management. */
11417 cpmuctrl = tr32(TG3_CPMU_CTRL);
11418 tw32(TG3_CPMU_CTRL,
11419 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11420 CPMU_CTRL_LINK_AWARE_MODE));
11423 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11424 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11426 if ((tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) &&
11427 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11428 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11430 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11431 tw32(TG3_CPMU_CTRL, cpmuctrl);
11433 /* Release the mutex */
11434 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11437 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11438 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
11439 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11440 err |= TG3_STD_LOOPBACK_FAILED <<
11441 TG3_PHY_LOOPBACK_SHIFT;
11442 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11443 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11444 err |= TG3_TSO_LOOPBACK_FAILED <<
11445 TG3_PHY_LOOPBACK_SHIFT;
11446 if ((tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) &&
11447 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11448 err |= TG3_JMB_LOOPBACK_FAILED <<
11449 TG3_PHY_LOOPBACK_SHIFT;
11452 /* Re-enable gphy autopowerdown. */
11453 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11454 tg3_phy_toggle_apd(tp, true);
11457 tp->phy_flags |= eee_cap;
11462 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11465 struct tg3 *tp = netdev_priv(dev);
11467 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11470 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11472 if (tg3_test_nvram(tp) != 0) {
11473 etest->flags |= ETH_TEST_FL_FAILED;
11476 if (tg3_test_link(tp) != 0) {
11477 etest->flags |= ETH_TEST_FL_FAILED;
11480 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11481 int err, err2 = 0, irq_sync = 0;
11483 if (netif_running(dev)) {
11485 tg3_netif_stop(tp);
11489 tg3_full_lock(tp, irq_sync);
11491 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11492 err = tg3_nvram_lock(tp);
11493 tg3_halt_cpu(tp, RX_CPU_BASE);
11494 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11495 tg3_halt_cpu(tp, TX_CPU_BASE);
11497 tg3_nvram_unlock(tp);
11499 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11502 if (tg3_test_registers(tp) != 0) {
11503 etest->flags |= ETH_TEST_FL_FAILED;
11506 if (tg3_test_memory(tp) != 0) {
11507 etest->flags |= ETH_TEST_FL_FAILED;
11510 if ((data[4] = tg3_test_loopback(tp)) != 0)
11511 etest->flags |= ETH_TEST_FL_FAILED;
11513 tg3_full_unlock(tp);
11515 if (tg3_test_interrupt(tp) != 0) {
11516 etest->flags |= ETH_TEST_FL_FAILED;
11520 tg3_full_lock(tp, 0);
11522 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11523 if (netif_running(dev)) {
11524 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11525 err2 = tg3_restart_hw(tp, 1);
11527 tg3_netif_start(tp);
11530 tg3_full_unlock(tp);
11532 if (irq_sync && !err2)
11535 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11536 tg3_power_down(tp);
11540 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11542 struct mii_ioctl_data *data = if_mii(ifr);
11543 struct tg3 *tp = netdev_priv(dev);
11546 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11547 struct phy_device *phydev;
11548 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11550 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11551 return phy_mii_ioctl(phydev, ifr, cmd);
11556 data->phy_id = tp->phy_addr;
11559 case SIOCGMIIREG: {
11562 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11563 break; /* We have no PHY */
11565 if (!netif_running(dev))
11568 spin_lock_bh(&tp->lock);
11569 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11570 spin_unlock_bh(&tp->lock);
11572 data->val_out = mii_regval;
11578 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11579 break; /* We have no PHY */
11581 if (!netif_running(dev))
11584 spin_lock_bh(&tp->lock);
11585 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11586 spin_unlock_bh(&tp->lock);
11594 return -EOPNOTSUPP;
11597 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11599 struct tg3 *tp = netdev_priv(dev);
11601 memcpy(ec, &tp->coal, sizeof(*ec));
11605 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11607 struct tg3 *tp = netdev_priv(dev);
11608 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11609 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11611 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11612 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11613 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11614 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11615 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11618 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11619 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11620 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11621 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11622 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11623 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11624 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11625 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11626 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11627 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11630 /* No rx interrupts will be generated if both are zero */
11631 if ((ec->rx_coalesce_usecs == 0) &&
11632 (ec->rx_max_coalesced_frames == 0))
11635 /* No tx interrupts will be generated if both are zero */
11636 if ((ec->tx_coalesce_usecs == 0) &&
11637 (ec->tx_max_coalesced_frames == 0))
11640 /* Only copy relevant parameters, ignore all others. */
11641 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11642 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11643 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11644 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11645 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11646 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11647 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11648 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11649 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11651 if (netif_running(dev)) {
11652 tg3_full_lock(tp, 0);
11653 __tg3_set_coalesce(tp, &tp->coal);
11654 tg3_full_unlock(tp);
11659 static const struct ethtool_ops tg3_ethtool_ops = {
11660 .get_settings = tg3_get_settings,
11661 .set_settings = tg3_set_settings,
11662 .get_drvinfo = tg3_get_drvinfo,
11663 .get_regs_len = tg3_get_regs_len,
11664 .get_regs = tg3_get_regs,
11665 .get_wol = tg3_get_wol,
11666 .set_wol = tg3_set_wol,
11667 .get_msglevel = tg3_get_msglevel,
11668 .set_msglevel = tg3_set_msglevel,
11669 .nway_reset = tg3_nway_reset,
11670 .get_link = ethtool_op_get_link,
11671 .get_eeprom_len = tg3_get_eeprom_len,
11672 .get_eeprom = tg3_get_eeprom,
11673 .set_eeprom = tg3_set_eeprom,
11674 .get_ringparam = tg3_get_ringparam,
11675 .set_ringparam = tg3_set_ringparam,
11676 .get_pauseparam = tg3_get_pauseparam,
11677 .set_pauseparam = tg3_set_pauseparam,
11678 .self_test = tg3_self_test,
11679 .get_strings = tg3_get_strings,
11680 .set_phys_id = tg3_set_phys_id,
11681 .get_ethtool_stats = tg3_get_ethtool_stats,
11682 .get_coalesce = tg3_get_coalesce,
11683 .set_coalesce = tg3_set_coalesce,
11684 .get_sset_count = tg3_get_sset_count,
11687 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11689 u32 cursize, val, magic;
11691 tp->nvram_size = EEPROM_CHIP_SIZE;
11693 if (tg3_nvram_read(tp, 0, &magic) != 0)
11696 if ((magic != TG3_EEPROM_MAGIC) &&
11697 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11698 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11702 * Size the chip by reading offsets at increasing powers of two.
11703 * When we encounter our validation signature, we know the addressing
11704 * has wrapped around, and thus have our chip size.
11708 while (cursize < tp->nvram_size) {
11709 if (tg3_nvram_read(tp, cursize, &val) != 0)
11718 tp->nvram_size = cursize;
11721 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11725 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11726 tg3_nvram_read(tp, 0, &val) != 0)
11729 /* Selfboot format */
11730 if (val != TG3_EEPROM_MAGIC) {
11731 tg3_get_eeprom_size(tp);
11735 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11737 /* This is confusing. We want to operate on the
11738 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11739 * call will read from NVRAM and byteswap the data
11740 * according to the byteswapping settings for all
11741 * other register accesses. This ensures the data we
11742 * want will always reside in the lower 16-bits.
11743 * However, the data in NVRAM is in LE format, which
11744 * means the data from the NVRAM read will always be
11745 * opposite the endianness of the CPU. The 16-bit
11746 * byteswap then brings the data to CPU endianness.
11748 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11752 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11755 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11759 nvcfg1 = tr32(NVRAM_CFG1);
11760 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11761 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11763 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11764 tw32(NVRAM_CFG1, nvcfg1);
11767 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11768 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11769 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11770 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11771 tp->nvram_jedecnum = JEDEC_ATMEL;
11772 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11773 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11775 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11776 tp->nvram_jedecnum = JEDEC_ATMEL;
11777 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11779 case FLASH_VENDOR_ATMEL_EEPROM:
11780 tp->nvram_jedecnum = JEDEC_ATMEL;
11781 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11782 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11784 case FLASH_VENDOR_ST:
11785 tp->nvram_jedecnum = JEDEC_ST;
11786 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11787 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11789 case FLASH_VENDOR_SAIFUN:
11790 tp->nvram_jedecnum = JEDEC_SAIFUN;
11791 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11793 case FLASH_VENDOR_SST_SMALL:
11794 case FLASH_VENDOR_SST_LARGE:
11795 tp->nvram_jedecnum = JEDEC_SST;
11796 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11800 tp->nvram_jedecnum = JEDEC_ATMEL;
11801 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11802 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11806 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11808 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11809 case FLASH_5752PAGE_SIZE_256:
11810 tp->nvram_pagesize = 256;
11812 case FLASH_5752PAGE_SIZE_512:
11813 tp->nvram_pagesize = 512;
11815 case FLASH_5752PAGE_SIZE_1K:
11816 tp->nvram_pagesize = 1024;
11818 case FLASH_5752PAGE_SIZE_2K:
11819 tp->nvram_pagesize = 2048;
11821 case FLASH_5752PAGE_SIZE_4K:
11822 tp->nvram_pagesize = 4096;
11824 case FLASH_5752PAGE_SIZE_264:
11825 tp->nvram_pagesize = 264;
11827 case FLASH_5752PAGE_SIZE_528:
11828 tp->nvram_pagesize = 528;
11833 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11837 nvcfg1 = tr32(NVRAM_CFG1);
11839 /* NVRAM protection for TPM */
11840 if (nvcfg1 & (1 << 27))
11841 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11843 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11844 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11845 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11846 tp->nvram_jedecnum = JEDEC_ATMEL;
11847 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11849 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11850 tp->nvram_jedecnum = JEDEC_ATMEL;
11851 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11852 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11854 case FLASH_5752VENDOR_ST_M45PE10:
11855 case FLASH_5752VENDOR_ST_M45PE20:
11856 case FLASH_5752VENDOR_ST_M45PE40:
11857 tp->nvram_jedecnum = JEDEC_ST;
11858 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11859 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11863 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11864 tg3_nvram_get_pagesize(tp, nvcfg1);
11866 /* For eeprom, set pagesize to maximum eeprom size */
11867 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11869 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11870 tw32(NVRAM_CFG1, nvcfg1);
11874 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11876 u32 nvcfg1, protect = 0;
11878 nvcfg1 = tr32(NVRAM_CFG1);
11880 /* NVRAM protection for TPM */
11881 if (nvcfg1 & (1 << 27)) {
11882 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11886 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11888 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11889 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11890 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11891 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11892 tp->nvram_jedecnum = JEDEC_ATMEL;
11893 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11894 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11895 tp->nvram_pagesize = 264;
11896 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11897 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11898 tp->nvram_size = (protect ? 0x3e200 :
11899 TG3_NVRAM_SIZE_512KB);
11900 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11901 tp->nvram_size = (protect ? 0x1f200 :
11902 TG3_NVRAM_SIZE_256KB);
11904 tp->nvram_size = (protect ? 0x1f200 :
11905 TG3_NVRAM_SIZE_128KB);
11907 case FLASH_5752VENDOR_ST_M45PE10:
11908 case FLASH_5752VENDOR_ST_M45PE20:
11909 case FLASH_5752VENDOR_ST_M45PE40:
11910 tp->nvram_jedecnum = JEDEC_ST;
11911 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11912 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11913 tp->nvram_pagesize = 256;
11914 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11915 tp->nvram_size = (protect ?
11916 TG3_NVRAM_SIZE_64KB :
11917 TG3_NVRAM_SIZE_128KB);
11918 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11919 tp->nvram_size = (protect ?
11920 TG3_NVRAM_SIZE_64KB :
11921 TG3_NVRAM_SIZE_256KB);
11923 tp->nvram_size = (protect ?
11924 TG3_NVRAM_SIZE_128KB :
11925 TG3_NVRAM_SIZE_512KB);
11930 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11934 nvcfg1 = tr32(NVRAM_CFG1);
11936 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11937 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11938 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11939 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11940 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11941 tp->nvram_jedecnum = JEDEC_ATMEL;
11942 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11943 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11945 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11946 tw32(NVRAM_CFG1, nvcfg1);
11948 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11949 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11950 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11951 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11952 tp->nvram_jedecnum = JEDEC_ATMEL;
11953 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11954 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11955 tp->nvram_pagesize = 264;
11957 case FLASH_5752VENDOR_ST_M45PE10:
11958 case FLASH_5752VENDOR_ST_M45PE20:
11959 case FLASH_5752VENDOR_ST_M45PE40:
11960 tp->nvram_jedecnum = JEDEC_ST;
11961 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11962 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11963 tp->nvram_pagesize = 256;
11968 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11970 u32 nvcfg1, protect = 0;
11972 nvcfg1 = tr32(NVRAM_CFG1);
11974 /* NVRAM protection for TPM */
11975 if (nvcfg1 & (1 << 27)) {
11976 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11980 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11982 case FLASH_5761VENDOR_ATMEL_ADB021D:
11983 case FLASH_5761VENDOR_ATMEL_ADB041D:
11984 case FLASH_5761VENDOR_ATMEL_ADB081D:
11985 case FLASH_5761VENDOR_ATMEL_ADB161D:
11986 case FLASH_5761VENDOR_ATMEL_MDB021D:
11987 case FLASH_5761VENDOR_ATMEL_MDB041D:
11988 case FLASH_5761VENDOR_ATMEL_MDB081D:
11989 case FLASH_5761VENDOR_ATMEL_MDB161D:
11990 tp->nvram_jedecnum = JEDEC_ATMEL;
11991 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11992 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11993 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11994 tp->nvram_pagesize = 256;
11996 case FLASH_5761VENDOR_ST_A_M45PE20:
11997 case FLASH_5761VENDOR_ST_A_M45PE40:
11998 case FLASH_5761VENDOR_ST_A_M45PE80:
11999 case FLASH_5761VENDOR_ST_A_M45PE16:
12000 case FLASH_5761VENDOR_ST_M_M45PE20:
12001 case FLASH_5761VENDOR_ST_M_M45PE40:
12002 case FLASH_5761VENDOR_ST_M_M45PE80:
12003 case FLASH_5761VENDOR_ST_M_M45PE16:
12004 tp->nvram_jedecnum = JEDEC_ST;
12005 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12006 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12007 tp->nvram_pagesize = 256;
12012 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12015 case FLASH_5761VENDOR_ATMEL_ADB161D:
12016 case FLASH_5761VENDOR_ATMEL_MDB161D:
12017 case FLASH_5761VENDOR_ST_A_M45PE16:
12018 case FLASH_5761VENDOR_ST_M_M45PE16:
12019 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12021 case FLASH_5761VENDOR_ATMEL_ADB081D:
12022 case FLASH_5761VENDOR_ATMEL_MDB081D:
12023 case FLASH_5761VENDOR_ST_A_M45PE80:
12024 case FLASH_5761VENDOR_ST_M_M45PE80:
12025 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12027 case FLASH_5761VENDOR_ATMEL_ADB041D:
12028 case FLASH_5761VENDOR_ATMEL_MDB041D:
12029 case FLASH_5761VENDOR_ST_A_M45PE40:
12030 case FLASH_5761VENDOR_ST_M_M45PE40:
12031 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12033 case FLASH_5761VENDOR_ATMEL_ADB021D:
12034 case FLASH_5761VENDOR_ATMEL_MDB021D:
12035 case FLASH_5761VENDOR_ST_A_M45PE20:
12036 case FLASH_5761VENDOR_ST_M_M45PE20:
12037 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12043 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12045 tp->nvram_jedecnum = JEDEC_ATMEL;
12046 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12047 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12050 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12054 nvcfg1 = tr32(NVRAM_CFG1);
12056 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12057 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12058 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12059 tp->nvram_jedecnum = JEDEC_ATMEL;
12060 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12061 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12063 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12064 tw32(NVRAM_CFG1, nvcfg1);
12066 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12067 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12068 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12069 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12070 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12071 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12072 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12073 tp->nvram_jedecnum = JEDEC_ATMEL;
12074 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12075 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12077 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12078 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12079 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12080 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12081 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12083 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12084 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12085 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12087 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12088 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12089 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12093 case FLASH_5752VENDOR_ST_M45PE10:
12094 case FLASH_5752VENDOR_ST_M45PE20:
12095 case FLASH_5752VENDOR_ST_M45PE40:
12096 tp->nvram_jedecnum = JEDEC_ST;
12097 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12098 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12100 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12101 case FLASH_5752VENDOR_ST_M45PE10:
12102 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12104 case FLASH_5752VENDOR_ST_M45PE20:
12105 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12107 case FLASH_5752VENDOR_ST_M45PE40:
12108 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12113 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12117 tg3_nvram_get_pagesize(tp, nvcfg1);
12118 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12119 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12123 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12127 nvcfg1 = tr32(NVRAM_CFG1);
12129 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12130 case FLASH_5717VENDOR_ATMEL_EEPROM:
12131 case FLASH_5717VENDOR_MICRO_EEPROM:
12132 tp->nvram_jedecnum = JEDEC_ATMEL;
12133 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12134 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12136 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12137 tw32(NVRAM_CFG1, nvcfg1);
12139 case FLASH_5717VENDOR_ATMEL_MDB011D:
12140 case FLASH_5717VENDOR_ATMEL_ADB011B:
12141 case FLASH_5717VENDOR_ATMEL_ADB011D:
12142 case FLASH_5717VENDOR_ATMEL_MDB021D:
12143 case FLASH_5717VENDOR_ATMEL_ADB021B:
12144 case FLASH_5717VENDOR_ATMEL_ADB021D:
12145 case FLASH_5717VENDOR_ATMEL_45USPT:
12146 tp->nvram_jedecnum = JEDEC_ATMEL;
12147 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12148 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12151 case FLASH_5717VENDOR_ATMEL_MDB021D:
12152 /* Detect size with tg3_nvram_get_size() */
12154 case FLASH_5717VENDOR_ATMEL_ADB021B:
12155 case FLASH_5717VENDOR_ATMEL_ADB021D:
12156 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12159 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12163 case FLASH_5717VENDOR_ST_M_M25PE10:
12164 case FLASH_5717VENDOR_ST_A_M25PE10:
12165 case FLASH_5717VENDOR_ST_M_M45PE10:
12166 case FLASH_5717VENDOR_ST_A_M45PE10:
12167 case FLASH_5717VENDOR_ST_M_M25PE20:
12168 case FLASH_5717VENDOR_ST_A_M25PE20:
12169 case FLASH_5717VENDOR_ST_M_M45PE20:
12170 case FLASH_5717VENDOR_ST_A_M45PE20:
12171 case FLASH_5717VENDOR_ST_25USPT:
12172 case FLASH_5717VENDOR_ST_45USPT:
12173 tp->nvram_jedecnum = JEDEC_ST;
12174 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12175 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12177 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12178 case FLASH_5717VENDOR_ST_M_M25PE20:
12179 case FLASH_5717VENDOR_ST_M_M45PE20:
12180 /* Detect size with tg3_nvram_get_size() */
12182 case FLASH_5717VENDOR_ST_A_M25PE20:
12183 case FLASH_5717VENDOR_ST_A_M45PE20:
12184 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12187 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12192 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12196 tg3_nvram_get_pagesize(tp, nvcfg1);
12197 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12198 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12201 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12203 u32 nvcfg1, nvmpinstrp;
12205 nvcfg1 = tr32(NVRAM_CFG1);
12206 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12208 switch (nvmpinstrp) {
12209 case FLASH_5720_EEPROM_HD:
12210 case FLASH_5720_EEPROM_LD:
12211 tp->nvram_jedecnum = JEDEC_ATMEL;
12212 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12214 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12215 tw32(NVRAM_CFG1, nvcfg1);
12216 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12217 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12219 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12221 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12222 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12223 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12224 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12225 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12226 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12227 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12228 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12229 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12230 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12231 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12232 case FLASH_5720VENDOR_ATMEL_45USPT:
12233 tp->nvram_jedecnum = JEDEC_ATMEL;
12234 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12235 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12237 switch (nvmpinstrp) {
12238 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12239 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12240 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12241 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12243 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12244 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12245 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12246 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12248 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12249 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12250 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12253 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12257 case FLASH_5720VENDOR_M_ST_M25PE10:
12258 case FLASH_5720VENDOR_M_ST_M45PE10:
12259 case FLASH_5720VENDOR_A_ST_M25PE10:
12260 case FLASH_5720VENDOR_A_ST_M45PE10:
12261 case FLASH_5720VENDOR_M_ST_M25PE20:
12262 case FLASH_5720VENDOR_M_ST_M45PE20:
12263 case FLASH_5720VENDOR_A_ST_M25PE20:
12264 case FLASH_5720VENDOR_A_ST_M45PE20:
12265 case FLASH_5720VENDOR_M_ST_M25PE40:
12266 case FLASH_5720VENDOR_M_ST_M45PE40:
12267 case FLASH_5720VENDOR_A_ST_M25PE40:
12268 case FLASH_5720VENDOR_A_ST_M45PE40:
12269 case FLASH_5720VENDOR_M_ST_M25PE80:
12270 case FLASH_5720VENDOR_M_ST_M45PE80:
12271 case FLASH_5720VENDOR_A_ST_M25PE80:
12272 case FLASH_5720VENDOR_A_ST_M45PE80:
12273 case FLASH_5720VENDOR_ST_25USPT:
12274 case FLASH_5720VENDOR_ST_45USPT:
12275 tp->nvram_jedecnum = JEDEC_ST;
12276 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
12277 tp->tg3_flags2 |= TG3_FLG2_FLASH;
12279 switch (nvmpinstrp) {
12280 case FLASH_5720VENDOR_M_ST_M25PE20:
12281 case FLASH_5720VENDOR_M_ST_M45PE20:
12282 case FLASH_5720VENDOR_A_ST_M25PE20:
12283 case FLASH_5720VENDOR_A_ST_M45PE20:
12284 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12286 case FLASH_5720VENDOR_M_ST_M25PE40:
12287 case FLASH_5720VENDOR_M_ST_M45PE40:
12288 case FLASH_5720VENDOR_A_ST_M25PE40:
12289 case FLASH_5720VENDOR_A_ST_M45PE40:
12290 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12292 case FLASH_5720VENDOR_M_ST_M25PE80:
12293 case FLASH_5720VENDOR_M_ST_M45PE80:
12294 case FLASH_5720VENDOR_A_ST_M25PE80:
12295 case FLASH_5720VENDOR_A_ST_M45PE80:
12296 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12299 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12304 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
12308 tg3_nvram_get_pagesize(tp, nvcfg1);
12309 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12310 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
12313 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12314 static void __devinit tg3_nvram_init(struct tg3 *tp)
12316 tw32_f(GRC_EEPROM_ADDR,
12317 (EEPROM_ADDR_FSM_RESET |
12318 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12319 EEPROM_ADDR_CLKPERD_SHIFT)));
12323 /* Enable seeprom accesses. */
12324 tw32_f(GRC_LOCAL_CTRL,
12325 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12328 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12329 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12330 tp->tg3_flags |= TG3_FLAG_NVRAM;
12332 if (tg3_nvram_lock(tp)) {
12333 netdev_warn(tp->dev,
12334 "Cannot get nvram lock, %s failed\n",
12338 tg3_enable_nvram_access(tp);
12340 tp->nvram_size = 0;
12342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12343 tg3_get_5752_nvram_info(tp);
12344 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12345 tg3_get_5755_nvram_info(tp);
12346 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12349 tg3_get_5787_nvram_info(tp);
12350 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12351 tg3_get_5761_nvram_info(tp);
12352 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12353 tg3_get_5906_nvram_info(tp);
12354 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12356 tg3_get_57780_nvram_info(tp);
12357 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12359 tg3_get_5717_nvram_info(tp);
12360 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12361 tg3_get_5720_nvram_info(tp);
12363 tg3_get_nvram_info(tp);
12365 if (tp->nvram_size == 0)
12366 tg3_get_nvram_size(tp);
12368 tg3_disable_nvram_access(tp);
12369 tg3_nvram_unlock(tp);
12372 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
12374 tg3_get_eeprom_size(tp);
12378 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12379 u32 offset, u32 len, u8 *buf)
12384 for (i = 0; i < len; i += 4) {
12390 memcpy(&data, buf + i, 4);
12393 * The SEEPROM interface expects the data to always be opposite
12394 * the native endian format. We accomplish this by reversing
12395 * all the operations that would have been performed on the
12396 * data from a call to tg3_nvram_read_be32().
12398 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12400 val = tr32(GRC_EEPROM_ADDR);
12401 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12403 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12405 tw32(GRC_EEPROM_ADDR, val |
12406 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12407 (addr & EEPROM_ADDR_ADDR_MASK) |
12408 EEPROM_ADDR_START |
12409 EEPROM_ADDR_WRITE);
12411 for (j = 0; j < 1000; j++) {
12412 val = tr32(GRC_EEPROM_ADDR);
12414 if (val & EEPROM_ADDR_COMPLETE)
12418 if (!(val & EEPROM_ADDR_COMPLETE)) {
12427 /* offset and length are dword aligned */
12428 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12432 u32 pagesize = tp->nvram_pagesize;
12433 u32 pagemask = pagesize - 1;
12437 tmp = kmalloc(pagesize, GFP_KERNEL);
12443 u32 phy_addr, page_off, size;
12445 phy_addr = offset & ~pagemask;
12447 for (j = 0; j < pagesize; j += 4) {
12448 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12449 (__be32 *) (tmp + j));
12456 page_off = offset & pagemask;
12463 memcpy(tmp + page_off, buf, size);
12465 offset = offset + (pagesize - page_off);
12467 tg3_enable_nvram_access(tp);
12470 * Before we can erase the flash page, we need
12471 * to issue a special "write enable" command.
12473 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12475 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12478 /* Erase the target page */
12479 tw32(NVRAM_ADDR, phy_addr);
12481 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12482 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12484 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12487 /* Issue another write enable to start the write. */
12488 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12490 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12493 for (j = 0; j < pagesize; j += 4) {
12496 data = *((__be32 *) (tmp + j));
12498 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12500 tw32(NVRAM_ADDR, phy_addr + j);
12502 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12506 nvram_cmd |= NVRAM_CMD_FIRST;
12507 else if (j == (pagesize - 4))
12508 nvram_cmd |= NVRAM_CMD_LAST;
12510 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12517 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12518 tg3_nvram_exec_cmd(tp, nvram_cmd);
12525 /* offset and length are dword aligned */
12526 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12531 for (i = 0; i < len; i += 4, offset += 4) {
12532 u32 page_off, phy_addr, nvram_cmd;
12535 memcpy(&data, buf + i, 4);
12536 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12538 page_off = offset % tp->nvram_pagesize;
12540 phy_addr = tg3_nvram_phys_addr(tp, offset);
12542 tw32(NVRAM_ADDR, phy_addr);
12544 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12546 if (page_off == 0 || i == 0)
12547 nvram_cmd |= NVRAM_CMD_FIRST;
12548 if (page_off == (tp->nvram_pagesize - 4))
12549 nvram_cmd |= NVRAM_CMD_LAST;
12551 if (i == (len - 4))
12552 nvram_cmd |= NVRAM_CMD_LAST;
12554 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12555 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12556 (tp->nvram_jedecnum == JEDEC_ST) &&
12557 (nvram_cmd & NVRAM_CMD_FIRST)) {
12559 if ((ret = tg3_nvram_exec_cmd(tp,
12560 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12565 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12566 /* We always do complete word writes to eeprom. */
12567 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12570 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12576 /* offset and length are dword aligned */
12577 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12581 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12582 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12583 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12587 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12588 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12592 ret = tg3_nvram_lock(tp);
12596 tg3_enable_nvram_access(tp);
12597 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12598 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12599 tw32(NVRAM_WRITE1, 0x406);
12601 grc_mode = tr32(GRC_MODE);
12602 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12604 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12605 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12607 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12610 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12614 grc_mode = tr32(GRC_MODE);
12615 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12617 tg3_disable_nvram_access(tp);
12618 tg3_nvram_unlock(tp);
12621 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12622 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12629 struct subsys_tbl_ent {
12630 u16 subsys_vendor, subsys_devid;
12634 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12635 /* Broadcom boards. */
12636 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12637 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12638 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12639 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12640 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12641 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12642 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12643 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12644 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12645 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12646 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12647 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12648 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12649 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12650 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12651 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12652 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12653 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12654 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12655 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12656 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12657 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12660 { TG3PCI_SUBVENDOR_ID_3COM,
12661 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12662 { TG3PCI_SUBVENDOR_ID_3COM,
12663 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12664 { TG3PCI_SUBVENDOR_ID_3COM,
12665 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12666 { TG3PCI_SUBVENDOR_ID_3COM,
12667 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12668 { TG3PCI_SUBVENDOR_ID_3COM,
12669 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12672 { TG3PCI_SUBVENDOR_ID_DELL,
12673 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12674 { TG3PCI_SUBVENDOR_ID_DELL,
12675 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12676 { TG3PCI_SUBVENDOR_ID_DELL,
12677 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12678 { TG3PCI_SUBVENDOR_ID_DELL,
12679 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12681 /* Compaq boards. */
12682 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12683 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12684 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12685 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12686 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12687 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12688 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12689 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12690 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12691 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12694 { TG3PCI_SUBVENDOR_ID_IBM,
12695 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12698 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12702 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12703 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12704 tp->pdev->subsystem_vendor) &&
12705 (subsys_id_to_phy_id[i].subsys_devid ==
12706 tp->pdev->subsystem_device))
12707 return &subsys_id_to_phy_id[i];
12712 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12717 /* On some early chips the SRAM cannot be accessed in D3hot state,
12718 * so need make sure we're in D0.
12720 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12721 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12722 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12725 /* Make sure register accesses (indirect or otherwise)
12726 * will function correctly.
12728 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12729 tp->misc_host_ctrl);
12731 /* The memory arbiter has to be enabled in order for SRAM accesses
12732 * to succeed. Normally on powerup the tg3 chip firmware will make
12733 * sure it is enabled, but other entities such as system netboot
12734 * code might disable it.
12736 val = tr32(MEMARB_MODE);
12737 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12739 tp->phy_id = TG3_PHY_ID_INVALID;
12740 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12742 /* Assume an onboard device and WOL capable by default. */
12743 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12746 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12747 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12748 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12750 val = tr32(VCPU_CFGSHDW);
12751 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12752 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12753 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12754 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12755 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12759 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12760 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12761 u32 nic_cfg, led_cfg;
12762 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12763 int eeprom_phy_serdes = 0;
12765 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12766 tp->nic_sram_data_cfg = nic_cfg;
12768 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12769 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12770 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12771 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12772 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12773 (ver > 0) && (ver < 0x100))
12774 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12777 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12779 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12780 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12781 eeprom_phy_serdes = 1;
12783 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12784 if (nic_phy_id != 0) {
12785 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12786 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12788 eeprom_phy_id = (id1 >> 16) << 10;
12789 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12790 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12794 tp->phy_id = eeprom_phy_id;
12795 if (eeprom_phy_serdes) {
12796 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12797 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12799 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12802 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12803 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12804 SHASTA_EXT_LED_MODE_MASK);
12806 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12810 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12811 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12814 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12815 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12818 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12819 tp->led_ctrl = LED_CTRL_MODE_MAC;
12821 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12822 * read on some older 5700/5701 bootcode.
12824 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12826 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12828 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12832 case SHASTA_EXT_LED_SHARED:
12833 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12834 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12835 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12836 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12837 LED_CTRL_MODE_PHY_2);
12840 case SHASTA_EXT_LED_MAC:
12841 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12844 case SHASTA_EXT_LED_COMBO:
12845 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12846 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12847 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12848 LED_CTRL_MODE_PHY_2);
12853 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12855 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12856 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12858 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12859 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12861 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12862 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12863 if ((tp->pdev->subsystem_vendor ==
12864 PCI_VENDOR_ID_ARIMA) &&
12865 (tp->pdev->subsystem_device == 0x205a ||
12866 tp->pdev->subsystem_device == 0x2063))
12867 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12869 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12870 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12873 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12874 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12875 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12876 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12879 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12880 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12881 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12883 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12884 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12885 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12887 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12888 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12889 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12891 if (cfg2 & (1 << 17))
12892 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12894 /* serdes signal pre-emphasis in register 0x590 set by */
12895 /* bootcode if bit 18 is set */
12896 if (cfg2 & (1 << 18))
12897 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12899 if (((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) ||
12900 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12901 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12902 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12903 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12905 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12906 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12907 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
12910 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12911 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12912 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12915 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12916 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12917 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12918 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12919 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12920 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12923 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12924 device_set_wakeup_enable(&tp->pdev->dev,
12925 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12927 device_set_wakeup_capable(&tp->pdev->dev, false);
12930 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12935 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12936 tw32(OTP_CTRL, cmd);
12938 /* Wait for up to 1 ms for command to execute. */
12939 for (i = 0; i < 100; i++) {
12940 val = tr32(OTP_STATUS);
12941 if (val & OTP_STATUS_CMD_DONE)
12946 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12949 /* Read the gphy configuration from the OTP region of the chip. The gphy
12950 * configuration is a 32-bit value that straddles the alignment boundary.
12951 * We do two 32-bit reads and then shift and merge the results.
12953 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12955 u32 bhalf_otp, thalf_otp;
12957 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12959 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12962 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12964 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12967 thalf_otp = tr32(OTP_READ_DATA);
12969 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12971 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12974 bhalf_otp = tr32(OTP_READ_DATA);
12976 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12979 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12981 u32 adv = ADVERTISED_Autoneg |
12984 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12985 adv |= ADVERTISED_1000baseT_Half |
12986 ADVERTISED_1000baseT_Full;
12988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12989 adv |= ADVERTISED_100baseT_Half |
12990 ADVERTISED_100baseT_Full |
12991 ADVERTISED_10baseT_Half |
12992 ADVERTISED_10baseT_Full |
12995 adv |= ADVERTISED_FIBRE;
12997 tp->link_config.advertising = adv;
12998 tp->link_config.speed = SPEED_INVALID;
12999 tp->link_config.duplex = DUPLEX_INVALID;
13000 tp->link_config.autoneg = AUTONEG_ENABLE;
13001 tp->link_config.active_speed = SPEED_INVALID;
13002 tp->link_config.active_duplex = DUPLEX_INVALID;
13003 tp->link_config.orig_speed = SPEED_INVALID;
13004 tp->link_config.orig_duplex = DUPLEX_INVALID;
13005 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13008 static int __devinit tg3_phy_probe(struct tg3 *tp)
13010 u32 hw_phy_id_1, hw_phy_id_2;
13011 u32 hw_phy_id, hw_phy_id_masked;
13014 /* flow control autonegotiation is default behavior */
13015 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13016 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13018 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
13019 return tg3_phy_init(tp);
13021 /* Reading the PHY ID register can conflict with ASF
13022 * firmware access to the PHY hardware.
13025 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13026 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
13027 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13029 /* Now read the physical PHY_ID from the chip and verify
13030 * that it is sane. If it doesn't look good, we fall back
13031 * to either the hard-coded table based PHY_ID and failing
13032 * that the value found in the eeprom area.
13034 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13035 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13037 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13038 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13039 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13041 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13044 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13045 tp->phy_id = hw_phy_id;
13046 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13047 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13049 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13051 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13052 /* Do nothing, phy ID already set up in
13053 * tg3_get_eeprom_hw_cfg().
13056 struct subsys_tbl_ent *p;
13058 /* No eeprom signature? Try the hardcoded
13059 * subsys device table.
13061 p = tg3_lookup_by_subsys(tp);
13065 tp->phy_id = p->phy_id;
13067 tp->phy_id == TG3_PHY_ID_BCM8002)
13068 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13072 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13073 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13074 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13075 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13076 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13077 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13079 tg3_phy_init_link_config(tp);
13081 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13082 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
13083 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
13084 u32 bmsr, adv_reg, tg3_ctrl, mask;
13086 tg3_readphy(tp, MII_BMSR, &bmsr);
13087 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13088 (bmsr & BMSR_LSTATUS))
13089 goto skip_phy_reset;
13091 err = tg3_phy_reset(tp);
13095 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
13096 ADVERTISE_100HALF | ADVERTISE_100FULL |
13097 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
13099 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
13100 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
13101 MII_TG3_CTRL_ADV_1000_FULL);
13102 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13103 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
13104 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
13105 MII_TG3_CTRL_ENABLE_AS_MASTER);
13108 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13109 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13110 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13111 if (!tg3_copper_is_advertising_all(tp, mask)) {
13112 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13114 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13115 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13117 tg3_writephy(tp, MII_BMCR,
13118 BMCR_ANENABLE | BMCR_ANRESTART);
13120 tg3_phy_set_wirespeed(tp);
13122 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13123 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13124 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13128 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13129 err = tg3_init_5401phy_dsp(tp);
13133 err = tg3_init_5401phy_dsp(tp);
13139 static void __devinit tg3_read_vpd(struct tg3 *tp)
13142 unsigned int block_end, rosize, len;
13145 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13149 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13150 PCI_VPD_LRDT_RO_DATA);
13152 goto out_not_found;
13154 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13155 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13156 i += PCI_VPD_LRDT_TAG_SIZE;
13158 if (block_end > TG3_NVM_VPD_LEN)
13159 goto out_not_found;
13161 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13162 PCI_VPD_RO_KEYWORD_MFR_ID);
13164 len = pci_vpd_info_field_size(&vpd_data[j]);
13166 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13167 if (j + len > block_end || len != 4 ||
13168 memcmp(&vpd_data[j], "1028", 4))
13171 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13172 PCI_VPD_RO_KEYWORD_VENDOR0);
13176 len = pci_vpd_info_field_size(&vpd_data[j]);
13178 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13179 if (j + len > block_end)
13182 memcpy(tp->fw_ver, &vpd_data[j], len);
13183 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13187 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13188 PCI_VPD_RO_KEYWORD_PARTNO);
13190 goto out_not_found;
13192 len = pci_vpd_info_field_size(&vpd_data[i]);
13194 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13195 if (len > TG3_BPN_SIZE ||
13196 (len + i) > TG3_NVM_VPD_LEN)
13197 goto out_not_found;
13199 memcpy(tp->board_part_number, &vpd_data[i], len);
13203 if (tp->board_part_number[0])
13207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13208 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13209 strcpy(tp->board_part_number, "BCM5717");
13210 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13211 strcpy(tp->board_part_number, "BCM5718");
13214 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13215 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13216 strcpy(tp->board_part_number, "BCM57780");
13217 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13218 strcpy(tp->board_part_number, "BCM57760");
13219 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13220 strcpy(tp->board_part_number, "BCM57790");
13221 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13222 strcpy(tp->board_part_number, "BCM57788");
13225 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13226 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13227 strcpy(tp->board_part_number, "BCM57761");
13228 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13229 strcpy(tp->board_part_number, "BCM57765");
13230 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13231 strcpy(tp->board_part_number, "BCM57781");
13232 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13233 strcpy(tp->board_part_number, "BCM57785");
13234 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13235 strcpy(tp->board_part_number, "BCM57791");
13236 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13237 strcpy(tp->board_part_number, "BCM57795");
13240 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13241 strcpy(tp->board_part_number, "BCM95906");
13244 strcpy(tp->board_part_number, "none");
13248 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13252 if (tg3_nvram_read(tp, offset, &val) ||
13253 (val & 0xfc000000) != 0x0c000000 ||
13254 tg3_nvram_read(tp, offset + 4, &val) ||
13261 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13263 u32 val, offset, start, ver_offset;
13265 bool newver = false;
13267 if (tg3_nvram_read(tp, 0xc, &offset) ||
13268 tg3_nvram_read(tp, 0x4, &start))
13271 offset = tg3_nvram_logical_addr(tp, offset);
13273 if (tg3_nvram_read(tp, offset, &val))
13276 if ((val & 0xfc000000) == 0x0c000000) {
13277 if (tg3_nvram_read(tp, offset + 4, &val))
13284 dst_off = strlen(tp->fw_ver);
13287 if (TG3_VER_SIZE - dst_off < 16 ||
13288 tg3_nvram_read(tp, offset + 8, &ver_offset))
13291 offset = offset + ver_offset - start;
13292 for (i = 0; i < 16; i += 4) {
13294 if (tg3_nvram_read_be32(tp, offset + i, &v))
13297 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13302 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13305 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13306 TG3_NVM_BCVER_MAJSFT;
13307 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13308 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13309 "v%d.%02d", major, minor);
13313 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13315 u32 val, major, minor;
13317 /* Use native endian representation */
13318 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13321 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13322 TG3_NVM_HWSB_CFG1_MAJSFT;
13323 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13324 TG3_NVM_HWSB_CFG1_MINSFT;
13326 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13329 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13331 u32 offset, major, minor, build;
13333 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13335 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13338 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13339 case TG3_EEPROM_SB_REVISION_0:
13340 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13342 case TG3_EEPROM_SB_REVISION_2:
13343 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13345 case TG3_EEPROM_SB_REVISION_3:
13346 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13348 case TG3_EEPROM_SB_REVISION_4:
13349 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13351 case TG3_EEPROM_SB_REVISION_5:
13352 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13354 case TG3_EEPROM_SB_REVISION_6:
13355 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13361 if (tg3_nvram_read(tp, offset, &val))
13364 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13365 TG3_EEPROM_SB_EDH_BLD_SHFT;
13366 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13367 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13368 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13370 if (minor > 99 || build > 26)
13373 offset = strlen(tp->fw_ver);
13374 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13375 " v%d.%02d", major, minor);
13378 offset = strlen(tp->fw_ver);
13379 if (offset < TG3_VER_SIZE - 1)
13380 tp->fw_ver[offset] = 'a' + build - 1;
13384 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13386 u32 val, offset, start;
13389 for (offset = TG3_NVM_DIR_START;
13390 offset < TG3_NVM_DIR_END;
13391 offset += TG3_NVM_DIRENT_SIZE) {
13392 if (tg3_nvram_read(tp, offset, &val))
13395 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13399 if (offset == TG3_NVM_DIR_END)
13402 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
13403 start = 0x08000000;
13404 else if (tg3_nvram_read(tp, offset - 4, &start))
13407 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13408 !tg3_fw_img_is_valid(tp, offset) ||
13409 tg3_nvram_read(tp, offset + 8, &val))
13412 offset += val - start;
13414 vlen = strlen(tp->fw_ver);
13416 tp->fw_ver[vlen++] = ',';
13417 tp->fw_ver[vlen++] = ' ';
13419 for (i = 0; i < 4; i++) {
13421 if (tg3_nvram_read_be32(tp, offset, &v))
13424 offset += sizeof(v);
13426 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13427 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13431 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13436 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13442 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
13443 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
13446 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13447 if (apedata != APE_SEG_SIG_MAGIC)
13450 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13451 if (!(apedata & APE_FW_STATUS_READY))
13454 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13456 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13457 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
13463 vlen = strlen(tp->fw_ver);
13465 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13467 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13468 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13469 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13470 (apedata & APE_FW_VERSION_BLDMSK));
13473 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13476 bool vpd_vers = false;
13478 if (tp->fw_ver[0] != 0)
13481 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
13482 strcat(tp->fw_ver, "sb");
13486 if (tg3_nvram_read(tp, 0, &val))
13489 if (val == TG3_EEPROM_MAGIC)
13490 tg3_read_bc_ver(tp);
13491 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13492 tg3_read_sb_ver(tp, val);
13493 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13494 tg3_read_hwsb_ver(tp);
13498 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13499 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
13502 tg3_read_mgmtfw_ver(tp);
13505 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13508 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13510 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13512 if (tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP)
13513 return TG3_RX_RET_MAX_SIZE_5717;
13514 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13515 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13516 return TG3_RX_RET_MAX_SIZE_5700;
13518 return TG3_RX_RET_MAX_SIZE_5705;
13521 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13522 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13523 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13524 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13528 static int __devinit tg3_get_invariants(struct tg3 *tp)
13531 u32 pci_state_reg, grc_misc_cfg;
13536 /* Force memory write invalidate off. If we leave it on,
13537 * then on 5700_BX chips we have to enable a workaround.
13538 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13539 * to match the cacheline size. The Broadcom driver have this
13540 * workaround but turns MWI off all the times so never uses
13541 * it. This seems to suggest that the workaround is insufficient.
13543 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13544 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13545 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13547 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13548 * has the register indirect write enable bit set before
13549 * we try to access any of the MMIO registers. It is also
13550 * critical that the PCI-X hw workaround situation is decided
13551 * before that as well.
13553 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13556 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13557 MISC_HOST_CTRL_CHIPREV_SHIFT);
13558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13559 u32 prod_id_asic_rev;
13561 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13562 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13563 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13564 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13565 pci_read_config_dword(tp->pdev,
13566 TG3PCI_GEN2_PRODID_ASICREV,
13567 &prod_id_asic_rev);
13568 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13569 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13570 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13571 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13572 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13574 pci_read_config_dword(tp->pdev,
13575 TG3PCI_GEN15_PRODID_ASICREV,
13576 &prod_id_asic_rev);
13578 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13579 &prod_id_asic_rev);
13581 tp->pci_chip_rev_id = prod_id_asic_rev;
13584 /* Wrong chip ID in 5752 A0. This code can be removed later
13585 * as A0 is not in production.
13587 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13588 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13590 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13591 * we need to disable memory and use config. cycles
13592 * only to access all registers. The 5702/03 chips
13593 * can mistakenly decode the special cycles from the
13594 * ICH chipsets as memory write cycles, causing corruption
13595 * of register and memory space. Only certain ICH bridges
13596 * will drive special cycles with non-zero data during the
13597 * address phase which can fall within the 5703's address
13598 * range. This is not an ICH bug as the PCI spec allows
13599 * non-zero address during special cycles. However, only
13600 * these ICH bridges are known to drive non-zero addresses
13601 * during special cycles.
13603 * Since special cycles do not cross PCI bridges, we only
13604 * enable this workaround if the 5703 is on the secondary
13605 * bus of these ICH bridges.
13607 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13608 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13609 static struct tg3_dev_id {
13613 } ich_chipsets[] = {
13614 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13616 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13618 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13620 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13624 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13625 struct pci_dev *bridge = NULL;
13627 while (pci_id->vendor != 0) {
13628 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13634 if (pci_id->rev != PCI_ANY_ID) {
13635 if (bridge->revision > pci_id->rev)
13638 if (bridge->subordinate &&
13639 (bridge->subordinate->number ==
13640 tp->pdev->bus->number)) {
13642 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13643 pci_dev_put(bridge);
13649 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13650 static struct tg3_dev_id {
13653 } bridge_chipsets[] = {
13654 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13655 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13658 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13659 struct pci_dev *bridge = NULL;
13661 while (pci_id->vendor != 0) {
13662 bridge = pci_get_device(pci_id->vendor,
13669 if (bridge->subordinate &&
13670 (bridge->subordinate->number <=
13671 tp->pdev->bus->number) &&
13672 (bridge->subordinate->subordinate >=
13673 tp->pdev->bus->number)) {
13674 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13675 pci_dev_put(bridge);
13681 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13682 * DMA addresses > 40-bit. This bridge may have other additional
13683 * 57xx devices behind it in some 4-port NIC designs for example.
13684 * Any tg3 device found behind the bridge will also need the 40-bit
13687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13689 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13690 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13691 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13693 struct pci_dev *bridge = NULL;
13696 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13697 PCI_DEVICE_ID_SERVERWORKS_EPB,
13699 if (bridge && bridge->subordinate &&
13700 (bridge->subordinate->number <=
13701 tp->pdev->bus->number) &&
13702 (bridge->subordinate->subordinate >=
13703 tp->pdev->bus->number)) {
13704 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13705 pci_dev_put(bridge);
13711 /* Initialize misc host control in PCI block. */
13712 tp->misc_host_ctrl |= (misc_ctrl_reg &
13713 MISC_HOST_CTRL_CHIPREV);
13714 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13715 tp->misc_host_ctrl);
13717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13718 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13719 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13721 tp->pdev_peer = tg3_find_peer(tp);
13723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13725 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13726 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13729 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13730 tp->tg3_flags3 |= TG3_FLG3_57765_PLUS;
13732 /* Intentionally exclude ASIC_REV_5906 */
13733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13734 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13735 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13736 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13738 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13739 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
13740 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13745 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13746 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13747 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13749 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13750 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13751 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13753 /* 5700 B0 chips do not support checksumming correctly due
13754 * to hardware bugs.
13756 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13757 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13759 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13760 features |= NETIF_F_IPV6_CSUM;
13761 tp->dev->features |= features;
13762 tp->dev->hw_features |= features;
13763 tp->dev->vlan_features |= features;
13766 /* Determine TSO capabilities */
13767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13768 ; /* Do nothing. HW bug. */
13769 else if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
13770 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13771 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13772 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13773 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13774 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13775 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13777 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13778 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13779 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13780 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13781 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13782 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13784 tp->fw_needed = FIRMWARE_TG3TSO5;
13786 tp->fw_needed = FIRMWARE_TG3TSO;
13791 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13792 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13793 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13794 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13795 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13796 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13797 tp->pdev_peer == tp->pdev))
13798 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13800 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13801 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13802 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13805 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
13806 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13807 tp->irq_max = TG3_IRQ_MAX_VECS;
13811 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13813 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13814 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13815 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13816 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13817 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13820 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13821 tp->tg3_flags3 |= TG3_FLG3_LRG_PROD_RING_CAP;
13823 if ((tp->tg3_flags3 & TG3_FLG3_57765_PLUS) &&
13824 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13825 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13827 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13828 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13829 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13830 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13832 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13835 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13836 if (tp->pcie_cap != 0) {
13839 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13841 tp->pcie_readrq = 4096;
13842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13844 tp->pcie_readrq = 2048;
13846 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13848 pci_read_config_word(tp->pdev,
13849 tp->pcie_cap + PCI_EXP_LNKCTL,
13851 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13853 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13856 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13857 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13858 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13859 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13860 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13862 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13863 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13864 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13865 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13866 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13867 if (!tp->pcix_cap) {
13868 dev_err(&tp->pdev->dev,
13869 "Cannot find PCI-X capability, aborting\n");
13873 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13874 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13877 /* If we have an AMD 762 or VIA K8T800 chipset, write
13878 * reordering to the mailbox registers done by the host
13879 * controller can cause major troubles. We read back from
13880 * every mailbox register write to force the writes to be
13881 * posted to the chip in order.
13883 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13884 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13885 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13887 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13888 &tp->pci_cacheline_sz);
13889 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13890 &tp->pci_lat_timer);
13891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13892 tp->pci_lat_timer < 64) {
13893 tp->pci_lat_timer = 64;
13894 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13895 tp->pci_lat_timer);
13898 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13899 /* 5700 BX chips need to have their TX producer index
13900 * mailboxes written twice to workaround a bug.
13902 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13904 /* If we are in PCI-X mode, enable register write workaround.
13906 * The workaround is to use indirect register accesses
13907 * for all chip writes not to mailbox registers.
13909 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13912 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13914 /* The chip can have it's power management PCI config
13915 * space registers clobbered due to this bug.
13916 * So explicitly force the chip into D0 here.
13918 pci_read_config_dword(tp->pdev,
13919 tp->pm_cap + PCI_PM_CTRL,
13921 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13922 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13923 pci_write_config_dword(tp->pdev,
13924 tp->pm_cap + PCI_PM_CTRL,
13927 /* Also, force SERR#/PERR# in PCI command. */
13928 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13929 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13930 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13934 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13935 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13936 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13937 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13939 /* Chip-specific fixup from Broadcom driver */
13940 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13941 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13942 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13943 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13946 /* Default fast path register access methods */
13947 tp->read32 = tg3_read32;
13948 tp->write32 = tg3_write32;
13949 tp->read32_mbox = tg3_read32;
13950 tp->write32_mbox = tg3_write32;
13951 tp->write32_tx_mbox = tg3_write32;
13952 tp->write32_rx_mbox = tg3_write32;
13954 /* Various workaround register access methods */
13955 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13956 tp->write32 = tg3_write_indirect_reg32;
13957 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13958 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13959 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13961 * Back to back register writes can cause problems on these
13962 * chips, the workaround is to read back all reg writes
13963 * except those to mailbox regs.
13965 * See tg3_write_indirect_reg32().
13967 tp->write32 = tg3_write_flush_reg32;
13970 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13971 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13972 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13973 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13974 tp->write32_rx_mbox = tg3_write_flush_reg32;
13977 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13978 tp->read32 = tg3_read_indirect_reg32;
13979 tp->write32 = tg3_write_indirect_reg32;
13980 tp->read32_mbox = tg3_read_indirect_mbox;
13981 tp->write32_mbox = tg3_write_indirect_mbox;
13982 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13983 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13988 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13989 pci_cmd &= ~PCI_COMMAND_MEMORY;
13990 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13993 tp->read32_mbox = tg3_read32_mbox_5906;
13994 tp->write32_mbox = tg3_write32_mbox_5906;
13995 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13996 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13999 if (tp->write32 == tg3_write_indirect_reg32 ||
14000 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
14001 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14003 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
14005 /* Get eeprom hw config before calling tg3_set_power_state().
14006 * In particular, the TG3_FLG2_IS_NIC flag must be
14007 * determined before calling tg3_set_power_state() so that
14008 * we know whether or not to switch out of Vaux power.
14009 * When the flag is set, it means that GPIO1 is used for eeprom
14010 * write protect and also implies that it is a LOM where GPIOs
14011 * are not used to switch power.
14013 tg3_get_eeprom_hw_cfg(tp);
14015 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14016 /* Allow reads and writes to the
14017 * APE register and memory space.
14019 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14020 PCISTATE_ALLOW_APE_SHMEM_WR |
14021 PCISTATE_ALLOW_APE_PSPACE_WR;
14022 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14030 (tp->tg3_flags3 & TG3_FLG3_57765_PLUS))
14031 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
14033 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
14034 * GPIO1 driven high will bring 5700's external PHY out of reset.
14035 * It is also used as eeprom write protect on LOMs.
14037 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14038 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14039 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
14040 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14041 GRC_LCLCTRL_GPIO_OUTPUT1);
14042 /* Unused GPIO3 must be driven as output on 5752 because there
14043 * are no pull-up resistors on unused GPIO pins.
14045 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14046 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14051 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14053 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14055 /* Turn off the debug UART. */
14056 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14057 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
14058 /* Keep VMain power. */
14059 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14060 GRC_LCLCTRL_GPIO_OUTPUT0;
14063 /* Force the chip into D0. */
14064 err = tg3_power_up(tp);
14066 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
14070 /* Derive initial jumbo mode from MTU assigned in
14071 * ether_setup() via the alloc_etherdev() call
14073 if (tp->dev->mtu > ETH_DATA_LEN &&
14074 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
14075 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
14077 /* Determine WakeOnLan speed to use. */
14078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14079 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14080 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14081 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14082 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
14084 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
14087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14088 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14090 /* A few boards don't want Ethernet@WireSpeed phy feature */
14091 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
14092 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
14093 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14094 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14095 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14096 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14097 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14099 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14100 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14101 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14102 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14103 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14105 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
14106 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14107 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14108 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14109 !(tp->tg3_flags3 & TG3_FLG3_57765_PLUS)) {
14110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14114 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14115 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14116 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14117 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14118 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14120 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14124 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14125 tp->phy_otp = tg3_read_otp_phycfg(tp);
14126 if (tp->phy_otp == 0)
14127 tp->phy_otp = TG3_OTP_DEFAULT;
14130 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
14131 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14133 tp->mi_mode = MAC_MI_MODE_BASE;
14135 tp->coalesce_mode = 0;
14136 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14137 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14138 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14140 /* Set these bits to enable statistics workaround. */
14141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14142 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14143 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14144 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14145 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14150 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
14152 err = tg3_mdio_init(tp);
14156 /* Initialize data/descriptor byte/word swapping. */
14157 val = tr32(GRC_MODE);
14158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14159 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14160 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14161 GRC_MODE_B2HRX_ENABLE |
14162 GRC_MODE_HTX2B_ENABLE |
14163 GRC_MODE_HOST_STACKUP);
14165 val &= GRC_MODE_HOST_STACKUP;
14167 tw32(GRC_MODE, val | tp->grc_mode);
14169 tg3_switch_clocks(tp);
14171 /* Clear this out for sanity. */
14172 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14174 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14176 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14177 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
14178 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14180 if (chiprevid == CHIPREV_ID_5701_A0 ||
14181 chiprevid == CHIPREV_ID_5701_B0 ||
14182 chiprevid == CHIPREV_ID_5701_B2 ||
14183 chiprevid == CHIPREV_ID_5701_B5) {
14184 void __iomem *sram_base;
14186 /* Write some dummy words into the SRAM status block
14187 * area, see if it reads back correctly. If the return
14188 * value is bad, force enable the PCIX workaround.
14190 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14192 writel(0x00000000, sram_base);
14193 writel(0x00000000, sram_base + 4);
14194 writel(0xffffffff, sram_base + 4);
14195 if (readl(sram_base) != 0x00000000)
14196 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
14201 tg3_nvram_init(tp);
14203 grc_misc_cfg = tr32(GRC_MISC_CFG);
14204 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14206 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14207 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14208 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14209 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
14211 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
14212 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14213 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
14214 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
14215 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14216 HOSTCC_MODE_CLRTICK_TXBD);
14218 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14219 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14220 tp->misc_host_ctrl);
14223 /* Preserve the APE MAC_MODE bits */
14224 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
14225 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14227 tp->mac_mode = TG3_DEF_MAC_MODE;
14229 /* these are limited to 10/100 only */
14230 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14231 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14232 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14233 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14234 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14235 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14236 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14237 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14238 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14239 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14240 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14241 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14242 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14243 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14244 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14245 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14247 err = tg3_phy_probe(tp);
14249 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14250 /* ... but do not return immediately ... */
14255 tg3_read_fw_ver(tp);
14257 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14258 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14261 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14263 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14266 /* 5700 {AX,BX} chips have a broken status block link
14267 * change bit implementation, so we must use the
14268 * status register in those cases.
14270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14271 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14273 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
14275 /* The led_ctrl is set during tg3_phy_probe, here we might
14276 * have to force the link status polling mechanism based
14277 * upon subsystem IDs.
14279 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14281 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14282 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14283 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
14286 /* For all SERDES we poll the MAC status register. */
14287 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14288 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
14290 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
14292 tp->rx_offset = NET_IP_ALIGN;
14293 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14295 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
14297 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14298 tp->rx_copy_thresh = ~(u16)0;
14302 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14303 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14304 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14306 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14308 /* Increment the rx prod index on the rx std ring by at most
14309 * 8 for these chips to workaround hw errata.
14311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14314 tp->rx_std_max_post = 8;
14316 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
14317 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14318 PCIE_PWR_MGMT_L1_THRESH_MSK;
14323 #ifdef CONFIG_SPARC
14324 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14326 struct net_device *dev = tp->dev;
14327 struct pci_dev *pdev = tp->pdev;
14328 struct device_node *dp = pci_device_to_OF_node(pdev);
14329 const unsigned char *addr;
14332 addr = of_get_property(dp, "local-mac-address", &len);
14333 if (addr && len == 6) {
14334 memcpy(dev->dev_addr, addr, 6);
14335 memcpy(dev->perm_addr, dev->dev_addr, 6);
14341 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14343 struct net_device *dev = tp->dev;
14345 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14346 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14351 static int __devinit tg3_get_device_address(struct tg3 *tp)
14353 struct net_device *dev = tp->dev;
14354 u32 hi, lo, mac_offset;
14357 #ifdef CONFIG_SPARC
14358 if (!tg3_get_macaddr_sparc(tp))
14363 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14364 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
14365 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14367 if (tg3_nvram_lock(tp))
14368 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14370 tg3_nvram_unlock(tp);
14371 } else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14372 if (PCI_FUNC(tp->pdev->devfn) & 1)
14374 if (PCI_FUNC(tp->pdev->devfn) > 1)
14375 mac_offset += 0x18c;
14376 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14379 /* First try to get it from MAC address mailbox. */
14380 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14381 if ((hi >> 16) == 0x484b) {
14382 dev->dev_addr[0] = (hi >> 8) & 0xff;
14383 dev->dev_addr[1] = (hi >> 0) & 0xff;
14385 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14386 dev->dev_addr[2] = (lo >> 24) & 0xff;
14387 dev->dev_addr[3] = (lo >> 16) & 0xff;
14388 dev->dev_addr[4] = (lo >> 8) & 0xff;
14389 dev->dev_addr[5] = (lo >> 0) & 0xff;
14391 /* Some old bootcode may report a 0 MAC address in SRAM */
14392 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14395 /* Next, try NVRAM. */
14396 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
14397 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14398 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14399 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14400 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14402 /* Finally just fetch it out of the MAC control regs. */
14404 hi = tr32(MAC_ADDR_0_HIGH);
14405 lo = tr32(MAC_ADDR_0_LOW);
14407 dev->dev_addr[5] = lo & 0xff;
14408 dev->dev_addr[4] = (lo >> 8) & 0xff;
14409 dev->dev_addr[3] = (lo >> 16) & 0xff;
14410 dev->dev_addr[2] = (lo >> 24) & 0xff;
14411 dev->dev_addr[1] = hi & 0xff;
14412 dev->dev_addr[0] = (hi >> 8) & 0xff;
14416 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14417 #ifdef CONFIG_SPARC
14418 if (!tg3_get_default_macaddr_sparc(tp))
14423 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14427 #define BOUNDARY_SINGLE_CACHELINE 1
14428 #define BOUNDARY_MULTI_CACHELINE 2
14430 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14432 int cacheline_size;
14436 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14438 cacheline_size = 1024;
14440 cacheline_size = (int) byte * 4;
14442 /* On 5703 and later chips, the boundary bits have no
14445 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14446 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14447 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
14450 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14451 goal = BOUNDARY_MULTI_CACHELINE;
14453 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14454 goal = BOUNDARY_SINGLE_CACHELINE;
14460 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14461 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14468 /* PCI controllers on most RISC systems tend to disconnect
14469 * when a device tries to burst across a cache-line boundary.
14470 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14472 * Unfortunately, for PCI-E there are only limited
14473 * write-side controls for this, and thus for reads
14474 * we will still get the disconnects. We'll also waste
14475 * these PCI cycles for both read and write for chips
14476 * other than 5700 and 5701 which do not implement the
14479 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
14480 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
14481 switch (cacheline_size) {
14486 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14487 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14488 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14490 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14491 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14496 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14497 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14501 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14502 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14505 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14506 switch (cacheline_size) {
14510 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14511 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14512 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14518 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14519 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14523 switch (cacheline_size) {
14525 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14526 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14527 DMA_RWCTRL_WRITE_BNDRY_16);
14532 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14533 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14534 DMA_RWCTRL_WRITE_BNDRY_32);
14539 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14540 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14541 DMA_RWCTRL_WRITE_BNDRY_64);
14546 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14547 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14548 DMA_RWCTRL_WRITE_BNDRY_128);
14553 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14554 DMA_RWCTRL_WRITE_BNDRY_256);
14557 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14558 DMA_RWCTRL_WRITE_BNDRY_512);
14562 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14563 DMA_RWCTRL_WRITE_BNDRY_1024);
14572 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14574 struct tg3_internal_buffer_desc test_desc;
14575 u32 sram_dma_descs;
14578 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14580 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14581 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14582 tw32(RDMAC_STATUS, 0);
14583 tw32(WDMAC_STATUS, 0);
14585 tw32(BUFMGR_MODE, 0);
14586 tw32(FTQ_RESET, 0);
14588 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14589 test_desc.addr_lo = buf_dma & 0xffffffff;
14590 test_desc.nic_mbuf = 0x00002100;
14591 test_desc.len = size;
14594 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14595 * the *second* time the tg3 driver was getting loaded after an
14598 * Broadcom tells me:
14599 * ...the DMA engine is connected to the GRC block and a DMA
14600 * reset may affect the GRC block in some unpredictable way...
14601 * The behavior of resets to individual blocks has not been tested.
14603 * Broadcom noted the GRC reset will also reset all sub-components.
14606 test_desc.cqid_sqid = (13 << 8) | 2;
14608 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14611 test_desc.cqid_sqid = (16 << 8) | 7;
14613 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14616 test_desc.flags = 0x00000005;
14618 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14621 val = *(((u32 *)&test_desc) + i);
14622 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14623 sram_dma_descs + (i * sizeof(u32)));
14624 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14626 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14629 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14631 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14634 for (i = 0; i < 40; i++) {
14638 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14640 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14641 if ((val & 0xffff) == sram_dma_descs) {
14652 #define TEST_BUFFER_SIZE 0x2000
14654 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14655 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14659 static int __devinit tg3_test_dma(struct tg3 *tp)
14661 dma_addr_t buf_dma;
14662 u32 *buf, saved_dma_rwctrl;
14665 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14666 &buf_dma, GFP_KERNEL);
14672 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14673 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14675 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14677 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS)
14680 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14681 /* DMA read watermark not used on PCIE */
14682 tp->dma_rwctrl |= 0x00180000;
14683 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14686 tp->dma_rwctrl |= 0x003f0000;
14688 tp->dma_rwctrl |= 0x003f000f;
14690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14692 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14693 u32 read_water = 0x7;
14695 /* If the 5704 is behind the EPB bridge, we can
14696 * do the less restrictive ONE_DMA workaround for
14697 * better performance.
14699 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14700 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14701 tp->dma_rwctrl |= 0x8000;
14702 else if (ccval == 0x6 || ccval == 0x7)
14703 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14707 /* Set bit 23 to enable PCIX hw bug fix */
14709 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14710 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14712 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14713 /* 5780 always in PCIX mode */
14714 tp->dma_rwctrl |= 0x00144000;
14715 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14716 /* 5714 always in PCIX mode */
14717 tp->dma_rwctrl |= 0x00148000;
14719 tp->dma_rwctrl |= 0x001b000f;
14723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14725 tp->dma_rwctrl &= 0xfffffff0;
14727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14729 /* Remove this if it causes problems for some boards. */
14730 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14732 /* On 5700/5701 chips, we need to set this bit.
14733 * Otherwise the chip will issue cacheline transactions
14734 * to streamable DMA memory with not all the byte
14735 * enables turned on. This is an error on several
14736 * RISC PCI controllers, in particular sparc64.
14738 * On 5703/5704 chips, this bit has been reassigned
14739 * a different meaning. In particular, it is used
14740 * on those chips to enable a PCI-X workaround.
14742 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14745 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14748 /* Unneeded, already done by tg3_get_invariants. */
14749 tg3_switch_clocks(tp);
14752 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14753 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14756 /* It is best to perform DMA test with maximum write burst size
14757 * to expose the 5700/5701 write DMA bug.
14759 saved_dma_rwctrl = tp->dma_rwctrl;
14760 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14761 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14766 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14769 /* Send the buffer to the chip. */
14770 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14772 dev_err(&tp->pdev->dev,
14773 "%s: Buffer write failed. err = %d\n",
14779 /* validate data reached card RAM correctly. */
14780 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14782 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14783 if (le32_to_cpu(val) != p[i]) {
14784 dev_err(&tp->pdev->dev,
14785 "%s: Buffer corrupted on device! "
14786 "(%d != %d)\n", __func__, val, i);
14787 /* ret = -ENODEV here? */
14792 /* Now read it back. */
14793 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14795 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14796 "err = %d\n", __func__, ret);
14801 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14805 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14806 DMA_RWCTRL_WRITE_BNDRY_16) {
14807 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14808 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14809 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14812 dev_err(&tp->pdev->dev,
14813 "%s: Buffer corrupted on read back! "
14814 "(%d != %d)\n", __func__, p[i], i);
14820 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14826 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14827 DMA_RWCTRL_WRITE_BNDRY_16) {
14828 /* DMA test passed without adjusting DMA boundary,
14829 * now look for chipsets that are known to expose the
14830 * DMA bug without failing the test.
14832 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14833 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14834 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14836 /* Safe to use the calculated DMA boundary. */
14837 tp->dma_rwctrl = saved_dma_rwctrl;
14840 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14844 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14849 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14851 if (tp->tg3_flags3 & TG3_FLG3_57765_PLUS) {
14852 tp->bufmgr_config.mbuf_read_dma_low_water =
14853 DEFAULT_MB_RDMA_LOW_WATER_5705;
14854 tp->bufmgr_config.mbuf_mac_rx_low_water =
14855 DEFAULT_MB_MACRX_LOW_WATER_57765;
14856 tp->bufmgr_config.mbuf_high_water =
14857 DEFAULT_MB_HIGH_WATER_57765;
14859 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14860 DEFAULT_MB_RDMA_LOW_WATER_5705;
14861 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14862 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14863 tp->bufmgr_config.mbuf_high_water_jumbo =
14864 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14865 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14866 tp->bufmgr_config.mbuf_read_dma_low_water =
14867 DEFAULT_MB_RDMA_LOW_WATER_5705;
14868 tp->bufmgr_config.mbuf_mac_rx_low_water =
14869 DEFAULT_MB_MACRX_LOW_WATER_5705;
14870 tp->bufmgr_config.mbuf_high_water =
14871 DEFAULT_MB_HIGH_WATER_5705;
14872 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14873 tp->bufmgr_config.mbuf_mac_rx_low_water =
14874 DEFAULT_MB_MACRX_LOW_WATER_5906;
14875 tp->bufmgr_config.mbuf_high_water =
14876 DEFAULT_MB_HIGH_WATER_5906;
14879 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14880 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14881 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14882 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14883 tp->bufmgr_config.mbuf_high_water_jumbo =
14884 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14886 tp->bufmgr_config.mbuf_read_dma_low_water =
14887 DEFAULT_MB_RDMA_LOW_WATER;
14888 tp->bufmgr_config.mbuf_mac_rx_low_water =
14889 DEFAULT_MB_MACRX_LOW_WATER;
14890 tp->bufmgr_config.mbuf_high_water =
14891 DEFAULT_MB_HIGH_WATER;
14893 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14894 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14895 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14896 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14897 tp->bufmgr_config.mbuf_high_water_jumbo =
14898 DEFAULT_MB_HIGH_WATER_JUMBO;
14901 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14902 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14905 static char * __devinit tg3_phy_string(struct tg3 *tp)
14907 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14908 case TG3_PHY_ID_BCM5400: return "5400";
14909 case TG3_PHY_ID_BCM5401: return "5401";
14910 case TG3_PHY_ID_BCM5411: return "5411";
14911 case TG3_PHY_ID_BCM5701: return "5701";
14912 case TG3_PHY_ID_BCM5703: return "5703";
14913 case TG3_PHY_ID_BCM5704: return "5704";
14914 case TG3_PHY_ID_BCM5705: return "5705";
14915 case TG3_PHY_ID_BCM5750: return "5750";
14916 case TG3_PHY_ID_BCM5752: return "5752";
14917 case TG3_PHY_ID_BCM5714: return "5714";
14918 case TG3_PHY_ID_BCM5780: return "5780";
14919 case TG3_PHY_ID_BCM5755: return "5755";
14920 case TG3_PHY_ID_BCM5787: return "5787";
14921 case TG3_PHY_ID_BCM5784: return "5784";
14922 case TG3_PHY_ID_BCM5756: return "5722/5756";
14923 case TG3_PHY_ID_BCM5906: return "5906";
14924 case TG3_PHY_ID_BCM5761: return "5761";
14925 case TG3_PHY_ID_BCM5718C: return "5718C";
14926 case TG3_PHY_ID_BCM5718S: return "5718S";
14927 case TG3_PHY_ID_BCM57765: return "57765";
14928 case TG3_PHY_ID_BCM5719C: return "5719C";
14929 case TG3_PHY_ID_BCM5720C: return "5720C";
14930 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14931 case 0: return "serdes";
14932 default: return "unknown";
14936 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14938 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14939 strcpy(str, "PCI Express");
14941 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14942 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14944 strcpy(str, "PCIX:");
14946 if ((clock_ctrl == 7) ||
14947 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14948 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14949 strcat(str, "133MHz");
14950 else if (clock_ctrl == 0)
14951 strcat(str, "33MHz");
14952 else if (clock_ctrl == 2)
14953 strcat(str, "50MHz");
14954 else if (clock_ctrl == 4)
14955 strcat(str, "66MHz");
14956 else if (clock_ctrl == 6)
14957 strcat(str, "100MHz");
14959 strcpy(str, "PCI:");
14960 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14961 strcat(str, "66MHz");
14963 strcat(str, "33MHz");
14965 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14966 strcat(str, ":32-bit");
14968 strcat(str, ":64-bit");
14972 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14974 struct pci_dev *peer;
14975 unsigned int func, devnr = tp->pdev->devfn & ~7;
14977 for (func = 0; func < 8; func++) {
14978 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14979 if (peer && peer != tp->pdev)
14983 /* 5704 can be configured in single-port mode, set peer to
14984 * tp->pdev in that case.
14992 * We don't need to keep the refcount elevated; there's no way
14993 * to remove one half of this device without removing the other
15000 static void __devinit tg3_init_coal(struct tg3 *tp)
15002 struct ethtool_coalesce *ec = &tp->coal;
15004 memset(ec, 0, sizeof(*ec));
15005 ec->cmd = ETHTOOL_GCOALESCE;
15006 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15007 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15008 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15009 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15010 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15011 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15012 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15013 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15014 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15016 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15017 HOSTCC_MODE_CLRTICK_TXBD)) {
15018 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15019 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15020 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15021 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15024 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
15025 ec->rx_coalesce_usecs_irq = 0;
15026 ec->tx_coalesce_usecs_irq = 0;
15027 ec->stats_block_coalesce_usecs = 0;
15031 static const struct net_device_ops tg3_netdev_ops = {
15032 .ndo_open = tg3_open,
15033 .ndo_stop = tg3_close,
15034 .ndo_start_xmit = tg3_start_xmit,
15035 .ndo_get_stats64 = tg3_get_stats64,
15036 .ndo_validate_addr = eth_validate_addr,
15037 .ndo_set_multicast_list = tg3_set_rx_mode,
15038 .ndo_set_mac_address = tg3_set_mac_addr,
15039 .ndo_do_ioctl = tg3_ioctl,
15040 .ndo_tx_timeout = tg3_tx_timeout,
15041 .ndo_change_mtu = tg3_change_mtu,
15042 .ndo_fix_features = tg3_fix_features,
15043 #ifdef CONFIG_NET_POLL_CONTROLLER
15044 .ndo_poll_controller = tg3_poll_controller,
15048 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
15049 .ndo_open = tg3_open,
15050 .ndo_stop = tg3_close,
15051 .ndo_start_xmit = tg3_start_xmit_dma_bug,
15052 .ndo_get_stats64 = tg3_get_stats64,
15053 .ndo_validate_addr = eth_validate_addr,
15054 .ndo_set_multicast_list = tg3_set_rx_mode,
15055 .ndo_set_mac_address = tg3_set_mac_addr,
15056 .ndo_do_ioctl = tg3_ioctl,
15057 .ndo_tx_timeout = tg3_tx_timeout,
15058 .ndo_change_mtu = tg3_change_mtu,
15059 #ifdef CONFIG_NET_POLL_CONTROLLER
15060 .ndo_poll_controller = tg3_poll_controller,
15064 static int __devinit tg3_init_one(struct pci_dev *pdev,
15065 const struct pci_device_id *ent)
15067 struct net_device *dev;
15069 int i, err, pm_cap;
15070 u32 sndmbx, rcvmbx, intmbx;
15072 u64 dma_mask, persist_dma_mask;
15073 u32 hw_features = 0;
15075 printk_once(KERN_INFO "%s\n", version);
15077 err = pci_enable_device(pdev);
15079 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15083 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15085 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15086 goto err_out_disable_pdev;
15089 pci_set_master(pdev);
15091 /* Find power-management capability. */
15092 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15094 dev_err(&pdev->dev,
15095 "Cannot find Power Management capability, aborting\n");
15097 goto err_out_free_res;
15100 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15102 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15104 goto err_out_free_res;
15107 SET_NETDEV_DEV(dev, &pdev->dev);
15109 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15111 tp = netdev_priv(dev);
15114 tp->pm_cap = pm_cap;
15115 tp->rx_mode = TG3_DEF_RX_MODE;
15116 tp->tx_mode = TG3_DEF_TX_MODE;
15119 tp->msg_enable = tg3_debug;
15121 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15123 /* The word/byte swap controls here control register access byte
15124 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15127 tp->misc_host_ctrl =
15128 MISC_HOST_CTRL_MASK_PCI_INT |
15129 MISC_HOST_CTRL_WORD_SWAP |
15130 MISC_HOST_CTRL_INDIR_ACCESS |
15131 MISC_HOST_CTRL_PCISTATE_RW;
15133 /* The NONFRM (non-frame) byte/word swap controls take effect
15134 * on descriptor entries, anything which isn't packet data.
15136 * The StrongARM chips on the board (one for tx, one for rx)
15137 * are running in big-endian mode.
15139 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15140 GRC_MODE_WSWAP_NONFRM_DATA);
15141 #ifdef __BIG_ENDIAN
15142 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15144 spin_lock_init(&tp->lock);
15145 spin_lock_init(&tp->indirect_lock);
15146 INIT_WORK(&tp->reset_task, tg3_reset_task);
15148 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15150 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15152 goto err_out_free_dev;
15155 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15156 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15158 dev->ethtool_ops = &tg3_ethtool_ops;
15159 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15160 dev->irq = pdev->irq;
15162 err = tg3_get_invariants(tp);
15164 dev_err(&pdev->dev,
15165 "Problem fetching invariants of chip, aborting\n");
15166 goto err_out_iounmap;
15169 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
15170 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
15171 dev->netdev_ops = &tg3_netdev_ops;
15173 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
15176 /* The EPB bridge inside 5714, 5715, and 5780 and any
15177 * device behind the EPB cannot support DMA addresses > 40-bit.
15178 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15179 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15180 * do DMA address check in tg3_start_xmit().
15182 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
15183 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15184 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
15185 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15186 #ifdef CONFIG_HIGHMEM
15187 dma_mask = DMA_BIT_MASK(64);
15190 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15192 /* Configure DMA attributes. */
15193 if (dma_mask > DMA_BIT_MASK(32)) {
15194 err = pci_set_dma_mask(pdev, dma_mask);
15196 dev->features |= NETIF_F_HIGHDMA;
15197 err = pci_set_consistent_dma_mask(pdev,
15200 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15201 "DMA for consistent allocations\n");
15202 goto err_out_iounmap;
15206 if (err || dma_mask == DMA_BIT_MASK(32)) {
15207 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15209 dev_err(&pdev->dev,
15210 "No usable DMA configuration, aborting\n");
15211 goto err_out_iounmap;
15215 tg3_init_bufmgr_config(tp);
15217 /* Selectively allow TSO based on operating conditions */
15218 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
15219 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
15220 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
15222 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
15223 tp->fw_needed = NULL;
15226 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15227 tp->fw_needed = FIRMWARE_TG3;
15229 /* TSO is on by default on chips that support hardware TSO.
15230 * Firmware TSO on older chips gives lower performance, so it
15231 * is off by default, but can be enabled using ethtool.
15233 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
15234 (dev->features & NETIF_F_IP_CSUM))
15235 hw_features |= NETIF_F_TSO;
15236 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
15237 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
15238 if (dev->features & NETIF_F_IPV6_CSUM)
15239 hw_features |= NETIF_F_TSO6;
15240 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
15241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15242 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15243 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15245 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15246 hw_features |= NETIF_F_TSO_ECN;
15249 dev->hw_features |= hw_features;
15250 dev->features |= hw_features;
15251 dev->vlan_features |= hw_features;
15253 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15254 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
15255 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15256 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
15257 tp->rx_pending = 63;
15260 err = tg3_get_device_address(tp);
15262 dev_err(&pdev->dev,
15263 "Could not obtain valid ethernet address, aborting\n");
15264 goto err_out_iounmap;
15267 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
15268 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15269 if (!tp->aperegs) {
15270 dev_err(&pdev->dev,
15271 "Cannot map APE registers, aborting\n");
15273 goto err_out_iounmap;
15276 tg3_ape_lock_init(tp);
15278 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
15279 tg3_read_dash_ver(tp);
15283 * Reset chip in case UNDI or EFI driver did not shutdown
15284 * DMA self test will enable WDMAC and we'll see (spurious)
15285 * pending DMA on the PCI bus at that point.
15287 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15288 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15289 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15290 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15293 err = tg3_test_dma(tp);
15295 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15296 goto err_out_apeunmap;
15299 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15300 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15301 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15302 for (i = 0; i < tp->irq_max; i++) {
15303 struct tg3_napi *tnapi = &tp->napi[i];
15306 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15308 tnapi->int_mbox = intmbx;
15314 tnapi->consmbox = rcvmbx;
15315 tnapi->prodmbox = sndmbx;
15318 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15320 tnapi->coal_now = HOSTCC_MODE_NOW;
15322 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
15326 * If we support MSIX, we'll be using RSS. If we're using
15327 * RSS, the first vector only handles link interrupts and the
15328 * remaining vectors handle rx and tx interrupts. Reuse the
15329 * mailbox values for the next iteration. The values we setup
15330 * above are still useful for the single vectored mode.
15345 pci_set_drvdata(pdev, dev);
15347 err = register_netdev(dev);
15349 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15350 goto err_out_apeunmap;
15353 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15354 tp->board_part_number,
15355 tp->pci_chip_rev_id,
15356 tg3_bus_string(tp, str),
15359 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15360 struct phy_device *phydev;
15361 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15363 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15364 phydev->drv->name, dev_name(&phydev->dev));
15368 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15369 ethtype = "10/100Base-TX";
15370 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15371 ethtype = "1000Base-SX";
15373 ethtype = "10/100/1000Base-T";
15375 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15376 "(WireSpeed[%d], EEE[%d])\n",
15377 tg3_phy_string(tp), ethtype,
15378 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15379 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15382 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15383 (dev->features & NETIF_F_RXCSUM) != 0,
15384 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
15385 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15386 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
15387 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
15388 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15390 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15391 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15393 pci_save_state(pdev);
15399 iounmap(tp->aperegs);
15400 tp->aperegs = NULL;
15413 pci_release_regions(pdev);
15415 err_out_disable_pdev:
15416 pci_disable_device(pdev);
15417 pci_set_drvdata(pdev, NULL);
15421 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15423 struct net_device *dev = pci_get_drvdata(pdev);
15426 struct tg3 *tp = netdev_priv(dev);
15429 release_firmware(tp->fw);
15431 cancel_work_sync(&tp->reset_task);
15433 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
15438 unregister_netdev(dev);
15440 iounmap(tp->aperegs);
15441 tp->aperegs = NULL;
15448 pci_release_regions(pdev);
15449 pci_disable_device(pdev);
15450 pci_set_drvdata(pdev, NULL);
15454 #ifdef CONFIG_PM_SLEEP
15455 static int tg3_suspend(struct device *device)
15457 struct pci_dev *pdev = to_pci_dev(device);
15458 struct net_device *dev = pci_get_drvdata(pdev);
15459 struct tg3 *tp = netdev_priv(dev);
15462 if (!netif_running(dev))
15465 flush_work_sync(&tp->reset_task);
15467 tg3_netif_stop(tp);
15469 del_timer_sync(&tp->timer);
15471 tg3_full_lock(tp, 1);
15472 tg3_disable_ints(tp);
15473 tg3_full_unlock(tp);
15475 netif_device_detach(dev);
15477 tg3_full_lock(tp, 0);
15478 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15479 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
15480 tg3_full_unlock(tp);
15482 err = tg3_power_down_prepare(tp);
15486 tg3_full_lock(tp, 0);
15488 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15489 err2 = tg3_restart_hw(tp, 1);
15493 tp->timer.expires = jiffies + tp->timer_offset;
15494 add_timer(&tp->timer);
15496 netif_device_attach(dev);
15497 tg3_netif_start(tp);
15500 tg3_full_unlock(tp);
15509 static int tg3_resume(struct device *device)
15511 struct pci_dev *pdev = to_pci_dev(device);
15512 struct net_device *dev = pci_get_drvdata(pdev);
15513 struct tg3 *tp = netdev_priv(dev);
15516 if (!netif_running(dev))
15519 netif_device_attach(dev);
15521 tg3_full_lock(tp, 0);
15523 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15524 err = tg3_restart_hw(tp, 1);
15528 tp->timer.expires = jiffies + tp->timer_offset;
15529 add_timer(&tp->timer);
15531 tg3_netif_start(tp);
15534 tg3_full_unlock(tp);
15542 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15543 #define TG3_PM_OPS (&tg3_pm_ops)
15547 #define TG3_PM_OPS NULL
15549 #endif /* CONFIG_PM_SLEEP */
15552 * tg3_io_error_detected - called when PCI error is detected
15553 * @pdev: Pointer to PCI device
15554 * @state: The current pci connection state
15556 * This function is called after a PCI bus error affecting
15557 * this device has been detected.
15559 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15560 pci_channel_state_t state)
15562 struct net_device *netdev = pci_get_drvdata(pdev);
15563 struct tg3 *tp = netdev_priv(netdev);
15564 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15566 netdev_info(netdev, "PCI I/O error detected\n");
15570 if (!netif_running(netdev))
15575 tg3_netif_stop(tp);
15577 del_timer_sync(&tp->timer);
15578 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
15580 /* Want to make sure that the reset task doesn't run */
15581 cancel_work_sync(&tp->reset_task);
15582 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
15583 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
15585 netif_device_detach(netdev);
15587 /* Clean up software state, even if MMIO is blocked */
15588 tg3_full_lock(tp, 0);
15589 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15590 tg3_full_unlock(tp);
15593 if (state == pci_channel_io_perm_failure)
15594 err = PCI_ERS_RESULT_DISCONNECT;
15596 pci_disable_device(pdev);
15604 * tg3_io_slot_reset - called after the pci bus has been reset.
15605 * @pdev: Pointer to PCI device
15607 * Restart the card from scratch, as if from a cold-boot.
15608 * At this point, the card has exprienced a hard reset,
15609 * followed by fixups by BIOS, and has its config space
15610 * set up identically to what it was at cold boot.
15612 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15614 struct net_device *netdev = pci_get_drvdata(pdev);
15615 struct tg3 *tp = netdev_priv(netdev);
15616 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15621 if (pci_enable_device(pdev)) {
15622 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15626 pci_set_master(pdev);
15627 pci_restore_state(pdev);
15628 pci_save_state(pdev);
15630 if (!netif_running(netdev)) {
15631 rc = PCI_ERS_RESULT_RECOVERED;
15635 err = tg3_power_up(tp);
15637 netdev_err(netdev, "Failed to restore register access.\n");
15641 rc = PCI_ERS_RESULT_RECOVERED;
15650 * tg3_io_resume - called when traffic can start flowing again.
15651 * @pdev: Pointer to PCI device
15653 * This callback is called when the error recovery driver tells
15654 * us that its OK to resume normal operation.
15656 static void tg3_io_resume(struct pci_dev *pdev)
15658 struct net_device *netdev = pci_get_drvdata(pdev);
15659 struct tg3 *tp = netdev_priv(netdev);
15664 if (!netif_running(netdev))
15667 tg3_full_lock(tp, 0);
15668 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15669 err = tg3_restart_hw(tp, 1);
15670 tg3_full_unlock(tp);
15672 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15676 netif_device_attach(netdev);
15678 tp->timer.expires = jiffies + tp->timer_offset;
15679 add_timer(&tp->timer);
15681 tg3_netif_start(tp);
15689 static struct pci_error_handlers tg3_err_handler = {
15690 .error_detected = tg3_io_error_detected,
15691 .slot_reset = tg3_io_slot_reset,
15692 .resume = tg3_io_resume
15695 static struct pci_driver tg3_driver = {
15696 .name = DRV_MODULE_NAME,
15697 .id_table = tg3_pci_tbl,
15698 .probe = tg3_init_one,
15699 .remove = __devexit_p(tg3_remove_one),
15700 .err_handler = &tg3_err_handler,
15701 .driver.pm = TG3_PM_OPS,
15704 static int __init tg3_init(void)
15706 return pci_register_driver(&tg3_driver);
15709 static void __exit tg3_cleanup(void)
15711 pci_unregister_driver(&tg3_driver);
15714 module_init(tg3_init);
15715 module_exit(tg3_cleanup);