2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.105"
72 #define DRV_MODULE_RELDATE "December 2, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 #define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
143 #define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
149 #define TG3_RAW_IP_ALIGN 2
151 /* number of ETHTOOL_GSTATS u64's */
152 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
154 #define TG3_NUM_TEST 6
156 #define FIRMWARE_TG3 "tigon/tg3.bin"
157 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
158 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
160 static char version[] __devinitdata =
161 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
163 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_MODULE_VERSION);
167 MODULE_FIRMWARE(FIRMWARE_TG3);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
169 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
171 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
173 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174 module_param(tg3_debug, int, 0);
175 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
177 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
250 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
251 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
252 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
253 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
257 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
259 static const struct {
260 const char string[ETH_GSTRING_LEN];
261 } ethtool_stats_keys[TG3_NUM_STATS] = {
264 { "rx_ucast_packets" },
265 { "rx_mcast_packets" },
266 { "rx_bcast_packets" },
268 { "rx_align_errors" },
269 { "rx_xon_pause_rcvd" },
270 { "rx_xoff_pause_rcvd" },
271 { "rx_mac_ctrl_rcvd" },
272 { "rx_xoff_entered" },
273 { "rx_frame_too_long_errors" },
275 { "rx_undersize_packets" },
276 { "rx_in_length_errors" },
277 { "rx_out_length_errors" },
278 { "rx_64_or_less_octet_packets" },
279 { "rx_65_to_127_octet_packets" },
280 { "rx_128_to_255_octet_packets" },
281 { "rx_256_to_511_octet_packets" },
282 { "rx_512_to_1023_octet_packets" },
283 { "rx_1024_to_1522_octet_packets" },
284 { "rx_1523_to_2047_octet_packets" },
285 { "rx_2048_to_4095_octet_packets" },
286 { "rx_4096_to_8191_octet_packets" },
287 { "rx_8192_to_9022_octet_packets" },
294 { "tx_flow_control" },
296 { "tx_single_collisions" },
297 { "tx_mult_collisions" },
299 { "tx_excessive_collisions" },
300 { "tx_late_collisions" },
301 { "tx_collide_2times" },
302 { "tx_collide_3times" },
303 { "tx_collide_4times" },
304 { "tx_collide_5times" },
305 { "tx_collide_6times" },
306 { "tx_collide_7times" },
307 { "tx_collide_8times" },
308 { "tx_collide_9times" },
309 { "tx_collide_10times" },
310 { "tx_collide_11times" },
311 { "tx_collide_12times" },
312 { "tx_collide_13times" },
313 { "tx_collide_14times" },
314 { "tx_collide_15times" },
315 { "tx_ucast_packets" },
316 { "tx_mcast_packets" },
317 { "tx_bcast_packets" },
318 { "tx_carrier_sense_errors" },
322 { "dma_writeq_full" },
323 { "dma_write_prioq_full" },
327 { "rx_threshold_hit" },
329 { "dma_readq_full" },
330 { "dma_read_prioq_full" },
331 { "tx_comp_queue_full" },
333 { "ring_set_send_prod_index" },
334 { "ring_status_update" },
336 { "nic_avoided_irqs" },
337 { "nic_tx_threshold_hit" }
340 static const struct {
341 const char string[ETH_GSTRING_LEN];
342 } ethtool_test_keys[TG3_NUM_TEST] = {
343 { "nvram test (online) " },
344 { "link test (online) " },
345 { "register test (offline)" },
346 { "memory test (offline)" },
347 { "loopback test (offline)" },
348 { "interrupt test (offline)" },
351 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
353 writel(val, tp->regs + off);
356 static u32 tg3_read32(struct tg3 *tp, u32 off)
358 return (readl(tp->regs + off));
361 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
363 writel(val, tp->aperegs + off);
366 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
368 return (readl(tp->aperegs + off));
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
375 spin_lock_irqsave(&tp->indirect_lock, flags);
376 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378 spin_unlock_irqrestore(&tp->indirect_lock, flags);
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
383 writel(val, tp->regs + off);
384 readl(tp->regs + off);
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
392 spin_lock_irqsave(&tp->indirect_lock, flags);
393 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395 spin_unlock_irqrestore(&tp->indirect_lock, flags);
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
403 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405 TG3_64BIT_REG_LOW, val);
408 if (off == TG3_RX_STD_PROD_IDX_REG) {
409 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410 TG3_64BIT_REG_LOW, val);
414 spin_lock_irqsave(&tp->indirect_lock, flags);
415 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417 spin_unlock_irqrestore(&tp->indirect_lock, flags);
419 /* In indirect mode when disabling interrupts, we also need
420 * to clear the interrupt bit in the GRC local ctrl register.
422 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
424 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
434 spin_lock_irqsave(&tp->indirect_lock, flags);
435 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437 spin_unlock_irqrestore(&tp->indirect_lock, flags);
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442 * where it is unsafe to read back the register without some delay.
443 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
448 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450 /* Non-posted methods */
451 tp->write32(tp, off, val);
454 tg3_write32(tp, off, val);
459 /* Wait again after the read for the posted method to guarantee that
460 * the wait time is met.
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
468 tp->write32_mbox(tp, off, val);
469 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471 tp->read32_mbox(tp, off);
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
476 void __iomem *mbox = tp->regs + off;
478 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
480 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
484 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
486 return (readl(tp->regs + off + GRCMBOX_BASE));
489 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
491 writel(val, tp->regs + off + GRCMBOX_BASE);
494 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
495 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
496 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
497 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
498 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
500 #define tw32(reg,val) tp->write32(tp, reg, val)
501 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
502 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
503 #define tr32(reg) tp->read32(tp, reg)
505 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
509 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
510 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
513 spin_lock_irqsave(&tp->indirect_lock, flags);
514 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
515 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
516 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
518 /* Always leave this as zero. */
519 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
521 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
522 tw32_f(TG3PCI_MEM_WIN_DATA, val);
524 /* Always leave this as zero. */
525 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
534 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
535 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
540 spin_lock_irqsave(&tp->indirect_lock, flags);
541 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
542 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
543 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
545 /* Always leave this as zero. */
546 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
548 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
549 *val = tr32(TG3PCI_MEM_WIN_DATA);
551 /* Always leave this as zero. */
552 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 static void tg3_ape_lock_init(struct tg3 *tp)
561 /* Make sure the driver hasn't any stale locks. */
562 for (i = 0; i < 8; i++)
563 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
564 APE_LOCK_GRANT_DRIVER);
567 static int tg3_ape_lock(struct tg3 *tp, int locknum)
573 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
577 case TG3_APE_LOCK_GRC:
578 case TG3_APE_LOCK_MEM:
586 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
588 /* Wait for up to 1 millisecond to acquire lock. */
589 for (i = 0; i < 100; i++) {
590 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
591 if (status == APE_LOCK_GRANT_DRIVER)
596 if (status != APE_LOCK_GRANT_DRIVER) {
597 /* Revoke the lock request. */
598 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
599 APE_LOCK_GRANT_DRIVER);
607 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
611 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
615 case TG3_APE_LOCK_GRC:
616 case TG3_APE_LOCK_MEM:
623 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
626 static void tg3_disable_ints(struct tg3 *tp)
630 tw32(TG3PCI_MISC_HOST_CTRL,
631 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
632 for (i = 0; i < tp->irq_max; i++)
633 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
636 static void tg3_enable_ints(struct tg3 *tp)
644 tw32(TG3PCI_MISC_HOST_CTRL,
645 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
647 for (i = 0; i < tp->irq_cnt; i++) {
648 struct tg3_napi *tnapi = &tp->napi[i];
649 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
650 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
651 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
653 coal_now |= tnapi->coal_now;
656 /* Force an initial interrupt */
657 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
658 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
659 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
661 tw32(HOSTCC_MODE, tp->coalesce_mode |
662 HOSTCC_MODE_ENABLE | coal_now);
665 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
667 struct tg3 *tp = tnapi->tp;
668 struct tg3_hw_status *sblk = tnapi->hw_status;
669 unsigned int work_exists = 0;
671 /* check for phy events */
672 if (!(tp->tg3_flags &
673 (TG3_FLAG_USE_LINKCHG_REG |
674 TG3_FLAG_POLL_SERDES))) {
675 if (sblk->status & SD_STATUS_LINK_CHG)
678 /* check for RX/TX work to do */
679 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
680 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
687 * similar to tg3_enable_ints, but it accurately determines whether there
688 * is new work pending and can return without flushing the PIO write
689 * which reenables interrupts
691 static void tg3_int_reenable(struct tg3_napi *tnapi)
693 struct tg3 *tp = tnapi->tp;
695 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
698 /* When doing tagged status, this work check is unnecessary.
699 * The last_tag we write above tells the chip which piece of
700 * work we've completed.
702 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
704 tw32(HOSTCC_MODE, tp->coalesce_mode |
705 HOSTCC_MODE_ENABLE | tnapi->coal_now);
708 static void tg3_napi_disable(struct tg3 *tp)
712 for (i = tp->irq_cnt - 1; i >= 0; i--)
713 napi_disable(&tp->napi[i].napi);
716 static void tg3_napi_enable(struct tg3 *tp)
720 for (i = 0; i < tp->irq_cnt; i++)
721 napi_enable(&tp->napi[i].napi);
724 static inline void tg3_netif_stop(struct tg3 *tp)
726 tp->dev->trans_start = jiffies; /* prevent tx timeout */
727 tg3_napi_disable(tp);
728 netif_tx_disable(tp->dev);
731 static inline void tg3_netif_start(struct tg3 *tp)
733 /* NOTE: unconditional netif_tx_wake_all_queues is only
734 * appropriate so long as all callers are assured to
735 * have free tx slots (such as after tg3_init_hw)
737 netif_tx_wake_all_queues(tp->dev);
740 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
744 static void tg3_switch_clocks(struct tg3 *tp)
749 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
750 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
753 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
755 orig_clock_ctrl = clock_ctrl;
756 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
757 CLOCK_CTRL_CLKRUN_OENABLE |
759 tp->pci_clock_ctrl = clock_ctrl;
761 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
762 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
763 tw32_wait_f(TG3PCI_CLOCK_CTRL,
764 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
766 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
767 tw32_wait_f(TG3PCI_CLOCK_CTRL,
769 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
771 tw32_wait_f(TG3PCI_CLOCK_CTRL,
772 clock_ctrl | (CLOCK_CTRL_ALTCLK),
775 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
778 #define PHY_BUSY_LOOPS 5000
780 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
786 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
788 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
794 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
795 MI_COM_PHY_ADDR_MASK);
796 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
797 MI_COM_REG_ADDR_MASK);
798 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
800 tw32_f(MAC_MI_COM, frame_val);
802 loops = PHY_BUSY_LOOPS;
805 frame_val = tr32(MAC_MI_COM);
807 if ((frame_val & MI_COM_BUSY) == 0) {
809 frame_val = tr32(MAC_MI_COM);
817 *val = frame_val & MI_COM_DATA_MASK;
821 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
822 tw32_f(MAC_MI_MODE, tp->mi_mode);
829 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
835 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
836 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
839 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
841 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
845 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
846 MI_COM_PHY_ADDR_MASK);
847 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
848 MI_COM_REG_ADDR_MASK);
849 frame_val |= (val & MI_COM_DATA_MASK);
850 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
852 tw32_f(MAC_MI_COM, frame_val);
854 loops = PHY_BUSY_LOOPS;
857 frame_val = tr32(MAC_MI_COM);
858 if ((frame_val & MI_COM_BUSY) == 0) {
860 frame_val = tr32(MAC_MI_COM);
870 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
871 tw32_f(MAC_MI_MODE, tp->mi_mode);
878 static int tg3_bmcr_reset(struct tg3 *tp)
883 /* OK, reset it, and poll the BMCR_RESET bit until it
884 * clears or we time out.
886 phy_control = BMCR_RESET;
887 err = tg3_writephy(tp, MII_BMCR, phy_control);
893 err = tg3_readphy(tp, MII_BMCR, &phy_control);
897 if ((phy_control & BMCR_RESET) == 0) {
909 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
911 struct tg3 *tp = bp->priv;
914 spin_lock_bh(&tp->lock);
916 if (tg3_readphy(tp, reg, &val))
919 spin_unlock_bh(&tp->lock);
924 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
926 struct tg3 *tp = bp->priv;
929 spin_lock_bh(&tp->lock);
931 if (tg3_writephy(tp, reg, val))
934 spin_unlock_bh(&tp->lock);
939 static int tg3_mdio_reset(struct mii_bus *bp)
944 static void tg3_mdio_config_5785(struct tg3 *tp)
947 struct phy_device *phydev;
949 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
950 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
951 case TG3_PHY_ID_BCM50610:
952 case TG3_PHY_ID_BCM50610M:
953 val = MAC_PHYCFG2_50610_LED_MODES;
955 case TG3_PHY_ID_BCMAC131:
956 val = MAC_PHYCFG2_AC131_LED_MODES;
958 case TG3_PHY_ID_RTL8211C:
959 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
961 case TG3_PHY_ID_RTL8201E:
962 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
968 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
969 tw32(MAC_PHYCFG2, val);
971 val = tr32(MAC_PHYCFG1);
972 val &= ~(MAC_PHYCFG1_RGMII_INT |
973 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
974 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
975 tw32(MAC_PHYCFG1, val);
980 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
981 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
982 MAC_PHYCFG2_FMODE_MASK_MASK |
983 MAC_PHYCFG2_GMODE_MASK_MASK |
984 MAC_PHYCFG2_ACT_MASK_MASK |
985 MAC_PHYCFG2_QUAL_MASK_MASK |
986 MAC_PHYCFG2_INBAND_ENABLE;
988 tw32(MAC_PHYCFG2, val);
990 val = tr32(MAC_PHYCFG1);
991 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
992 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
993 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
994 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
995 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
996 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
997 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
999 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1000 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1001 tw32(MAC_PHYCFG1, val);
1003 val = tr32(MAC_EXT_RGMII_MODE);
1004 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1005 MAC_RGMII_MODE_RX_QUALITY |
1006 MAC_RGMII_MODE_RX_ACTIVITY |
1007 MAC_RGMII_MODE_RX_ENG_DET |
1008 MAC_RGMII_MODE_TX_ENABLE |
1009 MAC_RGMII_MODE_TX_LOWPWR |
1010 MAC_RGMII_MODE_TX_RESET);
1011 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1012 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1013 val |= MAC_RGMII_MODE_RX_INT_B |
1014 MAC_RGMII_MODE_RX_QUALITY |
1015 MAC_RGMII_MODE_RX_ACTIVITY |
1016 MAC_RGMII_MODE_RX_ENG_DET;
1017 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1018 val |= MAC_RGMII_MODE_TX_ENABLE |
1019 MAC_RGMII_MODE_TX_LOWPWR |
1020 MAC_RGMII_MODE_TX_RESET;
1022 tw32(MAC_EXT_RGMII_MODE, val);
1025 static void tg3_mdio_start(struct tg3 *tp)
1027 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1028 tw32_f(MAC_MI_MODE, tp->mi_mode);
1031 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1032 u32 funcnum, is_serdes;
1034 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1040 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1044 tp->phy_addr = TG3_PHY_MII_ADDR;
1046 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1048 tg3_mdio_config_5785(tp);
1051 static int tg3_mdio_init(struct tg3 *tp)
1055 struct phy_device *phydev;
1059 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1060 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1063 tp->mdio_bus = mdiobus_alloc();
1064 if (tp->mdio_bus == NULL)
1067 tp->mdio_bus->name = "tg3 mdio bus";
1068 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1069 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1070 tp->mdio_bus->priv = tp;
1071 tp->mdio_bus->parent = &tp->pdev->dev;
1072 tp->mdio_bus->read = &tg3_mdio_read;
1073 tp->mdio_bus->write = &tg3_mdio_write;
1074 tp->mdio_bus->reset = &tg3_mdio_reset;
1075 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1076 tp->mdio_bus->irq = &tp->mdio_irq[0];
1078 for (i = 0; i < PHY_MAX_ADDR; i++)
1079 tp->mdio_bus->irq[i] = PHY_POLL;
1081 /* The bus registration will look for all the PHYs on the mdio bus.
1082 * Unfortunately, it does not ensure the PHY is powered up before
1083 * accessing the PHY ID registers. A chip reset is the
1084 * quickest way to bring the device back to an operational state..
1086 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1089 i = mdiobus_register(tp->mdio_bus);
1091 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1093 mdiobus_free(tp->mdio_bus);
1097 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1099 if (!phydev || !phydev->drv) {
1100 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1101 mdiobus_unregister(tp->mdio_bus);
1102 mdiobus_free(tp->mdio_bus);
1106 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1107 case TG3_PHY_ID_BCM57780:
1108 phydev->interface = PHY_INTERFACE_MODE_GMII;
1109 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1111 case TG3_PHY_ID_BCM50610:
1112 case TG3_PHY_ID_BCM50610M:
1113 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1114 PHY_BRCM_RX_REFCLK_UNUSED |
1115 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1116 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1117 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1118 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1119 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1120 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1121 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1122 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1124 case TG3_PHY_ID_RTL8211C:
1125 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1127 case TG3_PHY_ID_RTL8201E:
1128 case TG3_PHY_ID_BCMAC131:
1129 phydev->interface = PHY_INTERFACE_MODE_MII;
1130 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1131 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1135 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1138 tg3_mdio_config_5785(tp);
1143 static void tg3_mdio_fini(struct tg3 *tp)
1145 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1146 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1147 mdiobus_unregister(tp->mdio_bus);
1148 mdiobus_free(tp->mdio_bus);
1152 /* tp->lock is held. */
1153 static inline void tg3_generate_fw_event(struct tg3 *tp)
1157 val = tr32(GRC_RX_CPU_EVENT);
1158 val |= GRC_RX_CPU_DRIVER_EVENT;
1159 tw32_f(GRC_RX_CPU_EVENT, val);
1161 tp->last_event_jiffies = jiffies;
1164 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1166 /* tp->lock is held. */
1167 static void tg3_wait_for_event_ack(struct tg3 *tp)
1170 unsigned int delay_cnt;
1173 /* If enough time has passed, no wait is necessary. */
1174 time_remain = (long)(tp->last_event_jiffies + 1 +
1175 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1177 if (time_remain < 0)
1180 /* Check if we can shorten the wait time. */
1181 delay_cnt = jiffies_to_usecs(time_remain);
1182 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1183 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1184 delay_cnt = (delay_cnt >> 3) + 1;
1186 for (i = 0; i < delay_cnt; i++) {
1187 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1193 /* tp->lock is held. */
1194 static void tg3_ump_link_report(struct tg3 *tp)
1199 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1200 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1203 tg3_wait_for_event_ack(tp);
1205 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1207 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1210 if (!tg3_readphy(tp, MII_BMCR, ®))
1212 if (!tg3_readphy(tp, MII_BMSR, ®))
1213 val |= (reg & 0xffff);
1214 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1217 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1219 if (!tg3_readphy(tp, MII_LPA, ®))
1220 val |= (reg & 0xffff);
1221 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1224 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1225 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1227 if (!tg3_readphy(tp, MII_STAT1000, ®))
1228 val |= (reg & 0xffff);
1230 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1232 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1236 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1238 tg3_generate_fw_event(tp);
1241 static void tg3_link_report(struct tg3 *tp)
1243 if (!netif_carrier_ok(tp->dev)) {
1244 if (netif_msg_link(tp))
1245 printk(KERN_INFO PFX "%s: Link is down.\n",
1247 tg3_ump_link_report(tp);
1248 } else if (netif_msg_link(tp)) {
1249 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1251 (tp->link_config.active_speed == SPEED_1000 ?
1253 (tp->link_config.active_speed == SPEED_100 ?
1255 (tp->link_config.active_duplex == DUPLEX_FULL ?
1258 printk(KERN_INFO PFX
1259 "%s: Flow control is %s for TX and %s for RX.\n",
1261 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1263 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1265 tg3_ump_link_report(tp);
1269 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1273 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1274 miireg = ADVERTISE_PAUSE_CAP;
1275 else if (flow_ctrl & FLOW_CTRL_TX)
1276 miireg = ADVERTISE_PAUSE_ASYM;
1277 else if (flow_ctrl & FLOW_CTRL_RX)
1278 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1285 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1289 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1290 miireg = ADVERTISE_1000XPAUSE;
1291 else if (flow_ctrl & FLOW_CTRL_TX)
1292 miireg = ADVERTISE_1000XPSE_ASYM;
1293 else if (flow_ctrl & FLOW_CTRL_RX)
1294 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1301 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1305 if (lcladv & ADVERTISE_1000XPAUSE) {
1306 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1307 if (rmtadv & LPA_1000XPAUSE)
1308 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1309 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1312 if (rmtadv & LPA_1000XPAUSE)
1313 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1315 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1316 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1323 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1327 u32 old_rx_mode = tp->rx_mode;
1328 u32 old_tx_mode = tp->tx_mode;
1330 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1331 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1333 autoneg = tp->link_config.autoneg;
1335 if (autoneg == AUTONEG_ENABLE &&
1336 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1337 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1338 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1340 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1342 flowctrl = tp->link_config.flowctrl;
1344 tp->link_config.active_flowctrl = flowctrl;
1346 if (flowctrl & FLOW_CTRL_RX)
1347 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1349 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1351 if (old_rx_mode != tp->rx_mode)
1352 tw32_f(MAC_RX_MODE, tp->rx_mode);
1354 if (flowctrl & FLOW_CTRL_TX)
1355 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1357 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1359 if (old_tx_mode != tp->tx_mode)
1360 tw32_f(MAC_TX_MODE, tp->tx_mode);
1363 static void tg3_adjust_link(struct net_device *dev)
1365 u8 oldflowctrl, linkmesg = 0;
1366 u32 mac_mode, lcl_adv, rmt_adv;
1367 struct tg3 *tp = netdev_priv(dev);
1368 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1370 spin_lock_bh(&tp->lock);
1372 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1373 MAC_MODE_HALF_DUPLEX);
1375 oldflowctrl = tp->link_config.active_flowctrl;
1381 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1382 mac_mode |= MAC_MODE_PORT_MODE_MII;
1383 else if (phydev->speed == SPEED_1000 ||
1384 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1385 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1387 mac_mode |= MAC_MODE_PORT_MODE_MII;
1389 if (phydev->duplex == DUPLEX_HALF)
1390 mac_mode |= MAC_MODE_HALF_DUPLEX;
1392 lcl_adv = tg3_advert_flowctrl_1000T(
1393 tp->link_config.flowctrl);
1396 rmt_adv = LPA_PAUSE_CAP;
1397 if (phydev->asym_pause)
1398 rmt_adv |= LPA_PAUSE_ASYM;
1401 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1403 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1405 if (mac_mode != tp->mac_mode) {
1406 tp->mac_mode = mac_mode;
1407 tw32_f(MAC_MODE, tp->mac_mode);
1411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1412 if (phydev->speed == SPEED_10)
1414 MAC_MI_STAT_10MBPS_MODE |
1415 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1417 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1420 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1421 tw32(MAC_TX_LENGTHS,
1422 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1423 (6 << TX_LENGTHS_IPG_SHIFT) |
1424 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1426 tw32(MAC_TX_LENGTHS,
1427 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1428 (6 << TX_LENGTHS_IPG_SHIFT) |
1429 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1431 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1432 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1433 phydev->speed != tp->link_config.active_speed ||
1434 phydev->duplex != tp->link_config.active_duplex ||
1435 oldflowctrl != tp->link_config.active_flowctrl)
1438 tp->link_config.active_speed = phydev->speed;
1439 tp->link_config.active_duplex = phydev->duplex;
1441 spin_unlock_bh(&tp->lock);
1444 tg3_link_report(tp);
1447 static int tg3_phy_init(struct tg3 *tp)
1449 struct phy_device *phydev;
1451 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1454 /* Bring the PHY back to a known state. */
1457 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1459 /* Attach the MAC to the PHY. */
1460 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1461 phydev->dev_flags, phydev->interface);
1462 if (IS_ERR(phydev)) {
1463 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1464 return PTR_ERR(phydev);
1467 /* Mask with MAC supported features. */
1468 switch (phydev->interface) {
1469 case PHY_INTERFACE_MODE_GMII:
1470 case PHY_INTERFACE_MODE_RGMII:
1471 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1472 phydev->supported &= (PHY_GBIT_FEATURES |
1474 SUPPORTED_Asym_Pause);
1478 case PHY_INTERFACE_MODE_MII:
1479 phydev->supported &= (PHY_BASIC_FEATURES |
1481 SUPPORTED_Asym_Pause);
1484 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1488 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1490 phydev->advertising = phydev->supported;
1495 static void tg3_phy_start(struct tg3 *tp)
1497 struct phy_device *phydev;
1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1502 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1504 if (tp->link_config.phy_is_low_power) {
1505 tp->link_config.phy_is_low_power = 0;
1506 phydev->speed = tp->link_config.orig_speed;
1507 phydev->duplex = tp->link_config.orig_duplex;
1508 phydev->autoneg = tp->link_config.orig_autoneg;
1509 phydev->advertising = tp->link_config.orig_advertising;
1514 phy_start_aneg(phydev);
1517 static void tg3_phy_stop(struct tg3 *tp)
1519 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1522 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1525 static void tg3_phy_fini(struct tg3 *tp)
1527 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1528 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1529 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1533 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1535 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1536 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1539 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1543 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1546 tg3_writephy(tp, MII_TG3_FET_TEST,
1547 phytest | MII_TG3_FET_SHADOW_EN);
1548 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1550 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1552 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1553 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1555 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1559 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1563 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1564 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1565 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1568 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1569 tg3_phy_fet_toggle_apd(tp, enable);
1573 reg = MII_TG3_MISC_SHDW_WREN |
1574 MII_TG3_MISC_SHDW_SCR5_SEL |
1575 MII_TG3_MISC_SHDW_SCR5_LPED |
1576 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1577 MII_TG3_MISC_SHDW_SCR5_SDTL |
1578 MII_TG3_MISC_SHDW_SCR5_C125OE;
1579 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1580 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1582 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1585 reg = MII_TG3_MISC_SHDW_WREN |
1586 MII_TG3_MISC_SHDW_APD_SEL |
1587 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1589 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1591 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1594 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1598 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1599 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1602 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1605 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1606 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1608 tg3_writephy(tp, MII_TG3_FET_TEST,
1609 ephy | MII_TG3_FET_SHADOW_EN);
1610 if (!tg3_readphy(tp, reg, &phy)) {
1612 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1614 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1615 tg3_writephy(tp, reg, phy);
1617 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1620 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1621 MII_TG3_AUXCTL_SHDWSEL_MISC;
1622 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1623 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1625 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1627 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1628 phy |= MII_TG3_AUXCTL_MISC_WREN;
1629 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1634 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1638 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1641 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1642 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1643 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1644 (val | (1 << 15) | (1 << 4)));
1647 static void tg3_phy_apply_otp(struct tg3 *tp)
1656 /* Enable SM_DSP clock and tx 6dB coding. */
1657 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1658 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1659 MII_TG3_AUXCTL_ACTL_TX_6DB;
1660 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1662 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1663 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1664 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1666 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1667 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1668 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1670 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1671 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1672 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1674 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1675 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1677 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1678 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1680 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1681 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1682 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1684 /* Turn off SM_DSP clock. */
1685 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1686 MII_TG3_AUXCTL_ACTL_TX_6DB;
1687 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1690 static int tg3_wait_macro_done(struct tg3 *tp)
1697 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1698 if ((tmp32 & 0x1000) == 0)
1708 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1710 static const u32 test_pat[4][6] = {
1711 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1712 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1713 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1714 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1718 for (chan = 0; chan < 4; chan++) {
1721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1722 (chan * 0x2000) | 0x0200);
1723 tg3_writephy(tp, 0x16, 0x0002);
1725 for (i = 0; i < 6; i++)
1726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1729 tg3_writephy(tp, 0x16, 0x0202);
1730 if (tg3_wait_macro_done(tp)) {
1735 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1736 (chan * 0x2000) | 0x0200);
1737 tg3_writephy(tp, 0x16, 0x0082);
1738 if (tg3_wait_macro_done(tp)) {
1743 tg3_writephy(tp, 0x16, 0x0802);
1744 if (tg3_wait_macro_done(tp)) {
1749 for (i = 0; i < 6; i += 2) {
1752 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1753 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1754 tg3_wait_macro_done(tp)) {
1760 if (low != test_pat[chan][i] ||
1761 high != test_pat[chan][i+1]) {
1762 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1763 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1764 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1774 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1778 for (chan = 0; chan < 4; chan++) {
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1782 (chan * 0x2000) | 0x0200);
1783 tg3_writephy(tp, 0x16, 0x0002);
1784 for (i = 0; i < 6; i++)
1785 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1786 tg3_writephy(tp, 0x16, 0x0202);
1787 if (tg3_wait_macro_done(tp))
1794 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1796 u32 reg32, phy9_orig;
1797 int retries, do_phy_reset, err;
1803 err = tg3_bmcr_reset(tp);
1809 /* Disable transmitter and interrupt. */
1810 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1814 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1816 /* Set full-duplex, 1000 mbps. */
1817 tg3_writephy(tp, MII_BMCR,
1818 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1820 /* Set to master mode. */
1821 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1824 tg3_writephy(tp, MII_TG3_CTRL,
1825 (MII_TG3_CTRL_AS_MASTER |
1826 MII_TG3_CTRL_ENABLE_AS_MASTER));
1828 /* Enable SM_DSP_CLOCK and 6dB. */
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1831 /* Block the PHY control access. */
1832 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1835 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1838 } while (--retries);
1840 err = tg3_phy_reset_chanpat(tp);
1844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1847 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1848 tg3_writephy(tp, 0x16, 0x0000);
1850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1852 /* Set Extended packet length bit for jumbo frames */
1853 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1856 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1859 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1861 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1863 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1870 /* This will reset the tigon3 PHY if there is no valid
1871 * link unless the FORCE argument is non-zero.
1873 static int tg3_phy_reset(struct tg3 *tp)
1879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1882 val = tr32(GRC_MISC_CFG);
1883 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1886 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1887 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1891 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1892 netif_carrier_off(tp->dev);
1893 tg3_link_report(tp);
1896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1898 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1899 err = tg3_phy_reset_5703_4_5(tp);
1906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1907 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1908 cpmuctrl = tr32(TG3_CPMU_CTRL);
1909 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1911 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1914 err = tg3_bmcr_reset(tp);
1918 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1921 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1922 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1924 tw32(TG3_CPMU_CTRL, cpmuctrl);
1927 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1928 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1931 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1932 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1933 CPMU_LSPD_1000MB_MACCLK_12_5) {
1934 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1936 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1941 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1944 tg3_phy_apply_otp(tp);
1946 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1947 tg3_phy_toggle_apd(tp, true);
1949 tg3_phy_toggle_apd(tp, false);
1952 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1953 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1954 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1955 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1956 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1957 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1958 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1960 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1961 tg3_writephy(tp, 0x1c, 0x8d68);
1962 tg3_writephy(tp, 0x1c, 0x8d68);
1964 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1965 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1966 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1967 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1968 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1969 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1970 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1971 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1972 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1974 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1975 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1976 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1977 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1978 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1979 tg3_writephy(tp, MII_TG3_TEST1,
1980 MII_TG3_TEST1_TRIM_EN | 0x4);
1982 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1983 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1985 /* Set Extended packet length bit (bit 14) on all chips that */
1986 /* support jumbo frames */
1987 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1988 /* Cannot do read-modify-write on 5401 */
1989 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1990 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1993 /* Set bit 14 with read-modify-write to preserve other bits */
1994 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1995 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1996 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1999 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2000 * jumbo frames transmission.
2002 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2005 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2006 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2007 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2011 /* adjust output voltage */
2012 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2015 tg3_phy_toggle_automdix(tp, 1);
2016 tg3_phy_set_wirespeed(tp);
2020 static void tg3_frob_aux_power(struct tg3 *tp)
2022 struct tg3 *tp_peer = tp;
2024 /* The GPIOs do something completely different on 57765. */
2025 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2026 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2032 struct net_device *dev_peer;
2034 dev_peer = pci_get_drvdata(tp->pdev_peer);
2035 /* remove_one() may have been run on the peer. */
2039 tp_peer = netdev_priv(dev_peer);
2042 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2043 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2044 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2045 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2048 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2049 (GRC_LCLCTRL_GPIO_OE0 |
2050 GRC_LCLCTRL_GPIO_OE1 |
2051 GRC_LCLCTRL_GPIO_OE2 |
2052 GRC_LCLCTRL_GPIO_OUTPUT0 |
2053 GRC_LCLCTRL_GPIO_OUTPUT1),
2055 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2056 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2057 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2058 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2059 GRC_LCLCTRL_GPIO_OE1 |
2060 GRC_LCLCTRL_GPIO_OE2 |
2061 GRC_LCLCTRL_GPIO_OUTPUT0 |
2062 GRC_LCLCTRL_GPIO_OUTPUT1 |
2064 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2066 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2067 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2069 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2070 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2073 u32 grc_local_ctrl = 0;
2075 if (tp_peer != tp &&
2076 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2079 /* Workaround to prevent overdrawing Amps. */
2080 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2082 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2083 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2084 grc_local_ctrl, 100);
2087 /* On 5753 and variants, GPIO2 cannot be used. */
2088 no_gpio2 = tp->nic_sram_data_cfg &
2089 NIC_SRAM_DATA_CFG_NO_GPIO2;
2091 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2092 GRC_LCLCTRL_GPIO_OE1 |
2093 GRC_LCLCTRL_GPIO_OE2 |
2094 GRC_LCLCTRL_GPIO_OUTPUT1 |
2095 GRC_LCLCTRL_GPIO_OUTPUT2;
2097 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2098 GRC_LCLCTRL_GPIO_OUTPUT2);
2100 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2101 grc_local_ctrl, 100);
2103 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2105 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2106 grc_local_ctrl, 100);
2109 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2110 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2111 grc_local_ctrl, 100);
2115 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2116 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2117 if (tp_peer != tp &&
2118 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2121 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2122 (GRC_LCLCTRL_GPIO_OE1 |
2123 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2125 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2126 GRC_LCLCTRL_GPIO_OE1, 100);
2128 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2129 (GRC_LCLCTRL_GPIO_OE1 |
2130 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2135 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2137 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2139 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2140 if (speed != SPEED_10)
2142 } else if (speed == SPEED_10)
2148 static int tg3_setup_phy(struct tg3 *, int);
2150 #define RESET_KIND_SHUTDOWN 0
2151 #define RESET_KIND_INIT 1
2152 #define RESET_KIND_SUSPEND 2
2154 static void tg3_write_sig_post_reset(struct tg3 *, int);
2155 static int tg3_halt_cpu(struct tg3 *, u32);
2157 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2161 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2163 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2164 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2167 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2168 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2169 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2176 val = tr32(GRC_MISC_CFG);
2177 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2180 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2182 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2185 tg3_writephy(tp, MII_ADVERTISE, 0);
2186 tg3_writephy(tp, MII_BMCR,
2187 BMCR_ANENABLE | BMCR_ANRESTART);
2189 tg3_writephy(tp, MII_TG3_FET_TEST,
2190 phytest | MII_TG3_FET_SHADOW_EN);
2191 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2192 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2194 MII_TG3_FET_SHDW_AUXMODE4,
2197 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2200 } else if (do_low_power) {
2201 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2202 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2204 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2205 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2206 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2207 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2208 MII_TG3_AUXCTL_PCTL_VREG_11V);
2211 /* The PHY should not be powered down on some chips because
2214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2216 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2217 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2220 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2221 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2222 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2223 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2224 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2225 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2228 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2231 /* tp->lock is held. */
2232 static int tg3_nvram_lock(struct tg3 *tp)
2234 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2237 if (tp->nvram_lock_cnt == 0) {
2238 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2239 for (i = 0; i < 8000; i++) {
2240 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2245 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2249 tp->nvram_lock_cnt++;
2254 /* tp->lock is held. */
2255 static void tg3_nvram_unlock(struct tg3 *tp)
2257 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2258 if (tp->nvram_lock_cnt > 0)
2259 tp->nvram_lock_cnt--;
2260 if (tp->nvram_lock_cnt == 0)
2261 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2265 /* tp->lock is held. */
2266 static void tg3_enable_nvram_access(struct tg3 *tp)
2268 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2269 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2270 u32 nvaccess = tr32(NVRAM_ACCESS);
2272 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2276 /* tp->lock is held. */
2277 static void tg3_disable_nvram_access(struct tg3 *tp)
2279 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2280 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2281 u32 nvaccess = tr32(NVRAM_ACCESS);
2283 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2287 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2288 u32 offset, u32 *val)
2293 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2296 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2297 EEPROM_ADDR_DEVID_MASK |
2299 tw32(GRC_EEPROM_ADDR,
2301 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2302 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2303 EEPROM_ADDR_ADDR_MASK) |
2304 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2306 for (i = 0; i < 1000; i++) {
2307 tmp = tr32(GRC_EEPROM_ADDR);
2309 if (tmp & EEPROM_ADDR_COMPLETE)
2313 if (!(tmp & EEPROM_ADDR_COMPLETE))
2316 tmp = tr32(GRC_EEPROM_DATA);
2319 * The data will always be opposite the native endian
2320 * format. Perform a blind byteswap to compensate.
2327 #define NVRAM_CMD_TIMEOUT 10000
2329 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2333 tw32(NVRAM_CMD, nvram_cmd);
2334 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2336 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2342 if (i == NVRAM_CMD_TIMEOUT)
2348 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2350 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2351 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2352 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2353 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2354 (tp->nvram_jedecnum == JEDEC_ATMEL))
2356 addr = ((addr / tp->nvram_pagesize) <<
2357 ATMEL_AT45DB0X1B_PAGE_POS) +
2358 (addr % tp->nvram_pagesize);
2363 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2365 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2366 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2367 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2368 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2369 (tp->nvram_jedecnum == JEDEC_ATMEL))
2371 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2372 tp->nvram_pagesize) +
2373 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2378 /* NOTE: Data read in from NVRAM is byteswapped according to
2379 * the byteswapping settings for all other register accesses.
2380 * tg3 devices are BE devices, so on a BE machine, the data
2381 * returned will be exactly as it is seen in NVRAM. On a LE
2382 * machine, the 32-bit value will be byteswapped.
2384 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2388 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2389 return tg3_nvram_read_using_eeprom(tp, offset, val);
2391 offset = tg3_nvram_phys_addr(tp, offset);
2393 if (offset > NVRAM_ADDR_MSK)
2396 ret = tg3_nvram_lock(tp);
2400 tg3_enable_nvram_access(tp);
2402 tw32(NVRAM_ADDR, offset);
2403 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2404 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2407 *val = tr32(NVRAM_RDDATA);
2409 tg3_disable_nvram_access(tp);
2411 tg3_nvram_unlock(tp);
2416 /* Ensures NVRAM data is in bytestream format. */
2417 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2420 int res = tg3_nvram_read(tp, offset, &v);
2422 *val = cpu_to_be32(v);
2426 /* tp->lock is held. */
2427 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2429 u32 addr_high, addr_low;
2432 addr_high = ((tp->dev->dev_addr[0] << 8) |
2433 tp->dev->dev_addr[1]);
2434 addr_low = ((tp->dev->dev_addr[2] << 24) |
2435 (tp->dev->dev_addr[3] << 16) |
2436 (tp->dev->dev_addr[4] << 8) |
2437 (tp->dev->dev_addr[5] << 0));
2438 for (i = 0; i < 4; i++) {
2439 if (i == 1 && skip_mac_1)
2441 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2442 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2447 for (i = 0; i < 12; i++) {
2448 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2449 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2453 addr_high = (tp->dev->dev_addr[0] +
2454 tp->dev->dev_addr[1] +
2455 tp->dev->dev_addr[2] +
2456 tp->dev->dev_addr[3] +
2457 tp->dev->dev_addr[4] +
2458 tp->dev->dev_addr[5]) &
2459 TX_BACKOFF_SEED_MASK;
2460 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2463 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2466 bool device_should_wake, do_low_power;
2468 /* Make sure register accesses (indirect or otherwise)
2469 * will function correctly.
2471 pci_write_config_dword(tp->pdev,
2472 TG3PCI_MISC_HOST_CTRL,
2473 tp->misc_host_ctrl);
2477 pci_enable_wake(tp->pdev, state, false);
2478 pci_set_power_state(tp->pdev, PCI_D0);
2480 /* Switch out of Vaux if it is a NIC */
2481 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2482 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2492 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2493 tp->dev->name, state);
2497 /* Restore the CLKREQ setting. */
2498 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2501 pci_read_config_word(tp->pdev,
2502 tp->pcie_cap + PCI_EXP_LNKCTL,
2504 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2505 pci_write_config_word(tp->pdev,
2506 tp->pcie_cap + PCI_EXP_LNKCTL,
2510 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2511 tw32(TG3PCI_MISC_HOST_CTRL,
2512 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2514 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2515 device_may_wakeup(&tp->pdev->dev) &&
2516 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2518 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2519 do_low_power = false;
2520 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2521 !tp->link_config.phy_is_low_power) {
2522 struct phy_device *phydev;
2523 u32 phyid, advertising;
2525 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2527 tp->link_config.phy_is_low_power = 1;
2529 tp->link_config.orig_speed = phydev->speed;
2530 tp->link_config.orig_duplex = phydev->duplex;
2531 tp->link_config.orig_autoneg = phydev->autoneg;
2532 tp->link_config.orig_advertising = phydev->advertising;
2534 advertising = ADVERTISED_TP |
2536 ADVERTISED_Autoneg |
2537 ADVERTISED_10baseT_Half;
2539 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2540 device_should_wake) {
2541 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2543 ADVERTISED_100baseT_Half |
2544 ADVERTISED_100baseT_Full |
2545 ADVERTISED_10baseT_Full;
2547 advertising |= ADVERTISED_10baseT_Full;
2550 phydev->advertising = advertising;
2552 phy_start_aneg(phydev);
2554 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2555 if (phyid != TG3_PHY_ID_BCMAC131) {
2556 phyid &= TG3_PHY_OUI_MASK;
2557 if (phyid == TG3_PHY_OUI_1 ||
2558 phyid == TG3_PHY_OUI_2 ||
2559 phyid == TG3_PHY_OUI_3)
2560 do_low_power = true;
2564 do_low_power = true;
2566 if (tp->link_config.phy_is_low_power == 0) {
2567 tp->link_config.phy_is_low_power = 1;
2568 tp->link_config.orig_speed = tp->link_config.speed;
2569 tp->link_config.orig_duplex = tp->link_config.duplex;
2570 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2573 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2574 tp->link_config.speed = SPEED_10;
2575 tp->link_config.duplex = DUPLEX_HALF;
2576 tp->link_config.autoneg = AUTONEG_ENABLE;
2577 tg3_setup_phy(tp, 0);
2581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2584 val = tr32(GRC_VCPU_EXT_CTRL);
2585 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2586 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2590 for (i = 0; i < 200; i++) {
2591 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2592 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2597 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2598 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2599 WOL_DRV_STATE_SHUTDOWN |
2603 if (device_should_wake) {
2606 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2608 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2612 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2613 mac_mode = MAC_MODE_PORT_MODE_GMII;
2615 mac_mode = MAC_MODE_PORT_MODE_MII;
2617 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2618 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2620 u32 speed = (tp->tg3_flags &
2621 TG3_FLAG_WOL_SPEED_100MB) ?
2622 SPEED_100 : SPEED_10;
2623 if (tg3_5700_link_polarity(tp, speed))
2624 mac_mode |= MAC_MODE_LINK_POLARITY;
2626 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2629 mac_mode = MAC_MODE_PORT_MODE_TBI;
2632 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2633 tw32(MAC_LED_CTRL, tp->led_ctrl);
2635 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2636 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2637 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2638 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2639 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2640 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2642 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2643 mac_mode |= tp->mac_mode &
2644 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2645 if (mac_mode & MAC_MODE_APE_TX_EN)
2646 mac_mode |= MAC_MODE_TDE_ENABLE;
2649 tw32_f(MAC_MODE, mac_mode);
2652 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2656 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2657 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2658 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2661 base_val = tp->pci_clock_ctrl;
2662 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2663 CLOCK_CTRL_TXCLK_DISABLE);
2665 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2666 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2667 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2668 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2669 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2671 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2672 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2673 u32 newbits1, newbits2;
2675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2676 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2677 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2678 CLOCK_CTRL_TXCLK_DISABLE |
2680 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2681 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2682 newbits1 = CLOCK_CTRL_625_CORE;
2683 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2685 newbits1 = CLOCK_CTRL_ALTCLK;
2686 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2689 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2692 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2695 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2700 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2701 CLOCK_CTRL_TXCLK_DISABLE |
2702 CLOCK_CTRL_44MHZ_CORE);
2704 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2707 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2708 tp->pci_clock_ctrl | newbits3, 40);
2712 if (!(device_should_wake) &&
2713 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2714 tg3_power_down_phy(tp, do_low_power);
2716 tg3_frob_aux_power(tp);
2718 /* Workaround for unstable PLL clock */
2719 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2720 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2721 u32 val = tr32(0x7d00);
2723 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2725 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2728 err = tg3_nvram_lock(tp);
2729 tg3_halt_cpu(tp, RX_CPU_BASE);
2731 tg3_nvram_unlock(tp);
2735 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2737 if (device_should_wake)
2738 pci_enable_wake(tp->pdev, state, true);
2740 /* Finally, set the new power state. */
2741 pci_set_power_state(tp->pdev, state);
2746 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2748 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2749 case MII_TG3_AUX_STAT_10HALF:
2751 *duplex = DUPLEX_HALF;
2754 case MII_TG3_AUX_STAT_10FULL:
2756 *duplex = DUPLEX_FULL;
2759 case MII_TG3_AUX_STAT_100HALF:
2761 *duplex = DUPLEX_HALF;
2764 case MII_TG3_AUX_STAT_100FULL:
2766 *duplex = DUPLEX_FULL;
2769 case MII_TG3_AUX_STAT_1000HALF:
2770 *speed = SPEED_1000;
2771 *duplex = DUPLEX_HALF;
2774 case MII_TG3_AUX_STAT_1000FULL:
2775 *speed = SPEED_1000;
2776 *duplex = DUPLEX_FULL;
2780 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2781 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2783 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2787 *speed = SPEED_INVALID;
2788 *duplex = DUPLEX_INVALID;
2793 static void tg3_phy_copper_begin(struct tg3 *tp)
2798 if (tp->link_config.phy_is_low_power) {
2799 /* Entering low power mode. Disable gigabit and
2800 * 100baseT advertisements.
2802 tg3_writephy(tp, MII_TG3_CTRL, 0);
2804 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2805 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2806 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2807 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2809 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2810 } else if (tp->link_config.speed == SPEED_INVALID) {
2811 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2812 tp->link_config.advertising &=
2813 ~(ADVERTISED_1000baseT_Half |
2814 ADVERTISED_1000baseT_Full);
2816 new_adv = ADVERTISE_CSMA;
2817 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2818 new_adv |= ADVERTISE_10HALF;
2819 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2820 new_adv |= ADVERTISE_10FULL;
2821 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2822 new_adv |= ADVERTISE_100HALF;
2823 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2824 new_adv |= ADVERTISE_100FULL;
2826 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2828 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2830 if (tp->link_config.advertising &
2831 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2833 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2834 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2835 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2836 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2837 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2838 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2839 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2840 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2841 MII_TG3_CTRL_ENABLE_AS_MASTER);
2842 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2844 tg3_writephy(tp, MII_TG3_CTRL, 0);
2847 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2848 new_adv |= ADVERTISE_CSMA;
2850 /* Asking for a specific link mode. */
2851 if (tp->link_config.speed == SPEED_1000) {
2852 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2854 if (tp->link_config.duplex == DUPLEX_FULL)
2855 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2857 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2858 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2859 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2860 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2861 MII_TG3_CTRL_ENABLE_AS_MASTER);
2863 if (tp->link_config.speed == SPEED_100) {
2864 if (tp->link_config.duplex == DUPLEX_FULL)
2865 new_adv |= ADVERTISE_100FULL;
2867 new_adv |= ADVERTISE_100HALF;
2869 if (tp->link_config.duplex == DUPLEX_FULL)
2870 new_adv |= ADVERTISE_10FULL;
2872 new_adv |= ADVERTISE_10HALF;
2874 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2879 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2882 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2883 tp->link_config.speed != SPEED_INVALID) {
2884 u32 bmcr, orig_bmcr;
2886 tp->link_config.active_speed = tp->link_config.speed;
2887 tp->link_config.active_duplex = tp->link_config.duplex;
2890 switch (tp->link_config.speed) {
2896 bmcr |= BMCR_SPEED100;
2900 bmcr |= TG3_BMCR_SPEED1000;
2904 if (tp->link_config.duplex == DUPLEX_FULL)
2905 bmcr |= BMCR_FULLDPLX;
2907 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2908 (bmcr != orig_bmcr)) {
2909 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2910 for (i = 0; i < 1500; i++) {
2914 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2915 tg3_readphy(tp, MII_BMSR, &tmp))
2917 if (!(tmp & BMSR_LSTATUS)) {
2922 tg3_writephy(tp, MII_BMCR, bmcr);
2926 tg3_writephy(tp, MII_BMCR,
2927 BMCR_ANENABLE | BMCR_ANRESTART);
2931 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2935 /* Turn off tap power management. */
2936 /* Set Extended packet length bit */
2937 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2939 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2940 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2942 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2943 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2945 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2946 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2948 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2949 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2951 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2952 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2959 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2961 u32 adv_reg, all_mask = 0;
2963 if (mask & ADVERTISED_10baseT_Half)
2964 all_mask |= ADVERTISE_10HALF;
2965 if (mask & ADVERTISED_10baseT_Full)
2966 all_mask |= ADVERTISE_10FULL;
2967 if (mask & ADVERTISED_100baseT_Half)
2968 all_mask |= ADVERTISE_100HALF;
2969 if (mask & ADVERTISED_100baseT_Full)
2970 all_mask |= ADVERTISE_100FULL;
2972 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2975 if ((adv_reg & all_mask) != all_mask)
2977 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2981 if (mask & ADVERTISED_1000baseT_Half)
2982 all_mask |= ADVERTISE_1000HALF;
2983 if (mask & ADVERTISED_1000baseT_Full)
2984 all_mask |= ADVERTISE_1000FULL;
2986 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2989 if ((tg3_ctrl & all_mask) != all_mask)
2995 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2999 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3002 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3003 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3005 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3006 if (curadv != reqadv)
3009 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3010 tg3_readphy(tp, MII_LPA, rmtadv);
3012 /* Reprogram the advertisement register, even if it
3013 * does not affect the current link. If the link
3014 * gets renegotiated in the future, we can save an
3015 * additional renegotiation cycle by advertising
3016 * it correctly in the first place.
3018 if (curadv != reqadv) {
3019 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3020 ADVERTISE_PAUSE_ASYM);
3021 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3028 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3030 int current_link_up;
3032 u32 lcl_adv, rmt_adv;
3040 (MAC_STATUS_SYNC_CHANGED |
3041 MAC_STATUS_CFG_CHANGED |
3042 MAC_STATUS_MI_COMPLETION |
3043 MAC_STATUS_LNKSTATE_CHANGED));
3046 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3048 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3054 /* Some third-party PHYs need to be reset on link going
3057 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3060 netif_carrier_ok(tp->dev)) {
3061 tg3_readphy(tp, MII_BMSR, &bmsr);
3062 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3063 !(bmsr & BMSR_LSTATUS))
3069 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3070 tg3_readphy(tp, MII_BMSR, &bmsr);
3071 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3072 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3075 if (!(bmsr & BMSR_LSTATUS)) {
3076 err = tg3_init_5401phy_dsp(tp);
3080 tg3_readphy(tp, MII_BMSR, &bmsr);
3081 for (i = 0; i < 1000; i++) {
3083 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3084 (bmsr & BMSR_LSTATUS)) {
3090 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3091 !(bmsr & BMSR_LSTATUS) &&
3092 tp->link_config.active_speed == SPEED_1000) {
3093 err = tg3_phy_reset(tp);
3095 err = tg3_init_5401phy_dsp(tp);
3100 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3101 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3102 /* 5701 {A0,B0} CRC bug workaround */
3103 tg3_writephy(tp, 0x15, 0x0a75);
3104 tg3_writephy(tp, 0x1c, 0x8c68);
3105 tg3_writephy(tp, 0x1c, 0x8d68);
3106 tg3_writephy(tp, 0x1c, 0x8c68);
3109 /* Clear pending interrupts... */
3110 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3111 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3113 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3114 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3115 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3116 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3120 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3121 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3122 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3124 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3127 current_link_up = 0;
3128 current_speed = SPEED_INVALID;
3129 current_duplex = DUPLEX_INVALID;
3131 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3134 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3135 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3136 if (!(val & (1 << 10))) {
3138 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3144 for (i = 0; i < 100; i++) {
3145 tg3_readphy(tp, MII_BMSR, &bmsr);
3146 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3147 (bmsr & BMSR_LSTATUS))
3152 if (bmsr & BMSR_LSTATUS) {
3155 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3156 for (i = 0; i < 2000; i++) {
3158 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3163 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3168 for (i = 0; i < 200; i++) {
3169 tg3_readphy(tp, MII_BMCR, &bmcr);
3170 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3172 if (bmcr && bmcr != 0x7fff)
3180 tp->link_config.active_speed = current_speed;
3181 tp->link_config.active_duplex = current_duplex;
3183 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3184 if ((bmcr & BMCR_ANENABLE) &&
3185 tg3_copper_is_advertising_all(tp,
3186 tp->link_config.advertising)) {
3187 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3189 current_link_up = 1;
3192 if (!(bmcr & BMCR_ANENABLE) &&
3193 tp->link_config.speed == current_speed &&
3194 tp->link_config.duplex == current_duplex &&
3195 tp->link_config.flowctrl ==
3196 tp->link_config.active_flowctrl) {
3197 current_link_up = 1;
3201 if (current_link_up == 1 &&
3202 tp->link_config.active_duplex == DUPLEX_FULL)
3203 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3207 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3210 tg3_phy_copper_begin(tp);
3212 tg3_readphy(tp, MII_BMSR, &tmp);
3213 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3214 (tmp & BMSR_LSTATUS))
3215 current_link_up = 1;
3218 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3219 if (current_link_up == 1) {
3220 if (tp->link_config.active_speed == SPEED_100 ||
3221 tp->link_config.active_speed == SPEED_10)
3222 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3224 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3225 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3226 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3228 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3230 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3231 if (tp->link_config.active_duplex == DUPLEX_HALF)
3232 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3234 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3235 if (current_link_up == 1 &&
3236 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3237 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3239 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3242 /* ??? Without this setting Netgear GA302T PHY does not
3243 * ??? send/receive packets...
3245 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3246 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3247 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3248 tw32_f(MAC_MI_MODE, tp->mi_mode);
3252 tw32_f(MAC_MODE, tp->mac_mode);
3255 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3256 /* Polled via timer. */
3257 tw32_f(MAC_EVENT, 0);
3259 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3264 current_link_up == 1 &&
3265 tp->link_config.active_speed == SPEED_1000 &&
3266 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3267 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3270 (MAC_STATUS_SYNC_CHANGED |
3271 MAC_STATUS_CFG_CHANGED));
3274 NIC_SRAM_FIRMWARE_MBOX,
3275 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3278 /* Prevent send BD corruption. */
3279 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3280 u16 oldlnkctl, newlnkctl;
3282 pci_read_config_word(tp->pdev,
3283 tp->pcie_cap + PCI_EXP_LNKCTL,
3285 if (tp->link_config.active_speed == SPEED_100 ||
3286 tp->link_config.active_speed == SPEED_10)
3287 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3289 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3290 if (newlnkctl != oldlnkctl)
3291 pci_write_config_word(tp->pdev,
3292 tp->pcie_cap + PCI_EXP_LNKCTL,
3296 if (current_link_up != netif_carrier_ok(tp->dev)) {
3297 if (current_link_up)
3298 netif_carrier_on(tp->dev);
3300 netif_carrier_off(tp->dev);
3301 tg3_link_report(tp);
3307 struct tg3_fiber_aneginfo {
3309 #define ANEG_STATE_UNKNOWN 0
3310 #define ANEG_STATE_AN_ENABLE 1
3311 #define ANEG_STATE_RESTART_INIT 2
3312 #define ANEG_STATE_RESTART 3
3313 #define ANEG_STATE_DISABLE_LINK_OK 4
3314 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3315 #define ANEG_STATE_ABILITY_DETECT 6
3316 #define ANEG_STATE_ACK_DETECT_INIT 7
3317 #define ANEG_STATE_ACK_DETECT 8
3318 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3319 #define ANEG_STATE_COMPLETE_ACK 10
3320 #define ANEG_STATE_IDLE_DETECT_INIT 11
3321 #define ANEG_STATE_IDLE_DETECT 12
3322 #define ANEG_STATE_LINK_OK 13
3323 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3324 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3327 #define MR_AN_ENABLE 0x00000001
3328 #define MR_RESTART_AN 0x00000002
3329 #define MR_AN_COMPLETE 0x00000004
3330 #define MR_PAGE_RX 0x00000008
3331 #define MR_NP_LOADED 0x00000010
3332 #define MR_TOGGLE_TX 0x00000020
3333 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3334 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3335 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3336 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3337 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3338 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3339 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3340 #define MR_TOGGLE_RX 0x00002000
3341 #define MR_NP_RX 0x00004000
3343 #define MR_LINK_OK 0x80000000
3345 unsigned long link_time, cur_time;
3347 u32 ability_match_cfg;
3348 int ability_match_count;
3350 char ability_match, idle_match, ack_match;
3352 u32 txconfig, rxconfig;
3353 #define ANEG_CFG_NP 0x00000080
3354 #define ANEG_CFG_ACK 0x00000040
3355 #define ANEG_CFG_RF2 0x00000020
3356 #define ANEG_CFG_RF1 0x00000010
3357 #define ANEG_CFG_PS2 0x00000001
3358 #define ANEG_CFG_PS1 0x00008000
3359 #define ANEG_CFG_HD 0x00004000
3360 #define ANEG_CFG_FD 0x00002000
3361 #define ANEG_CFG_INVAL 0x00001f06
3366 #define ANEG_TIMER_ENAB 2
3367 #define ANEG_FAILED -1
3369 #define ANEG_STATE_SETTLE_TIME 10000
3371 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3372 struct tg3_fiber_aneginfo *ap)
3375 unsigned long delta;
3379 if (ap->state == ANEG_STATE_UNKNOWN) {
3383 ap->ability_match_cfg = 0;
3384 ap->ability_match_count = 0;
3385 ap->ability_match = 0;
3391 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3392 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3394 if (rx_cfg_reg != ap->ability_match_cfg) {
3395 ap->ability_match_cfg = rx_cfg_reg;
3396 ap->ability_match = 0;
3397 ap->ability_match_count = 0;
3399 if (++ap->ability_match_count > 1) {
3400 ap->ability_match = 1;
3401 ap->ability_match_cfg = rx_cfg_reg;
3404 if (rx_cfg_reg & ANEG_CFG_ACK)
3412 ap->ability_match_cfg = 0;
3413 ap->ability_match_count = 0;
3414 ap->ability_match = 0;
3420 ap->rxconfig = rx_cfg_reg;
3424 case ANEG_STATE_UNKNOWN:
3425 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3426 ap->state = ANEG_STATE_AN_ENABLE;
3429 case ANEG_STATE_AN_ENABLE:
3430 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3431 if (ap->flags & MR_AN_ENABLE) {
3434 ap->ability_match_cfg = 0;
3435 ap->ability_match_count = 0;
3436 ap->ability_match = 0;
3440 ap->state = ANEG_STATE_RESTART_INIT;
3442 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3446 case ANEG_STATE_RESTART_INIT:
3447 ap->link_time = ap->cur_time;
3448 ap->flags &= ~(MR_NP_LOADED);
3450 tw32(MAC_TX_AUTO_NEG, 0);
3451 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3452 tw32_f(MAC_MODE, tp->mac_mode);
3455 ret = ANEG_TIMER_ENAB;
3456 ap->state = ANEG_STATE_RESTART;
3459 case ANEG_STATE_RESTART:
3460 delta = ap->cur_time - ap->link_time;
3461 if (delta > ANEG_STATE_SETTLE_TIME) {
3462 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3464 ret = ANEG_TIMER_ENAB;
3468 case ANEG_STATE_DISABLE_LINK_OK:
3472 case ANEG_STATE_ABILITY_DETECT_INIT:
3473 ap->flags &= ~(MR_TOGGLE_TX);
3474 ap->txconfig = ANEG_CFG_FD;
3475 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3476 if (flowctrl & ADVERTISE_1000XPAUSE)
3477 ap->txconfig |= ANEG_CFG_PS1;
3478 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3479 ap->txconfig |= ANEG_CFG_PS2;
3480 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3481 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3482 tw32_f(MAC_MODE, tp->mac_mode);
3485 ap->state = ANEG_STATE_ABILITY_DETECT;
3488 case ANEG_STATE_ABILITY_DETECT:
3489 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3490 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3494 case ANEG_STATE_ACK_DETECT_INIT:
3495 ap->txconfig |= ANEG_CFG_ACK;
3496 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3497 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3498 tw32_f(MAC_MODE, tp->mac_mode);
3501 ap->state = ANEG_STATE_ACK_DETECT;
3504 case ANEG_STATE_ACK_DETECT:
3505 if (ap->ack_match != 0) {
3506 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3507 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3508 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3510 ap->state = ANEG_STATE_AN_ENABLE;
3512 } else if (ap->ability_match != 0 &&
3513 ap->rxconfig == 0) {
3514 ap->state = ANEG_STATE_AN_ENABLE;
3518 case ANEG_STATE_COMPLETE_ACK_INIT:
3519 if (ap->rxconfig & ANEG_CFG_INVAL) {
3523 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3524 MR_LP_ADV_HALF_DUPLEX |
3525 MR_LP_ADV_SYM_PAUSE |
3526 MR_LP_ADV_ASYM_PAUSE |
3527 MR_LP_ADV_REMOTE_FAULT1 |
3528 MR_LP_ADV_REMOTE_FAULT2 |
3529 MR_LP_ADV_NEXT_PAGE |
3532 if (ap->rxconfig & ANEG_CFG_FD)
3533 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3534 if (ap->rxconfig & ANEG_CFG_HD)
3535 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3536 if (ap->rxconfig & ANEG_CFG_PS1)
3537 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3538 if (ap->rxconfig & ANEG_CFG_PS2)
3539 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3540 if (ap->rxconfig & ANEG_CFG_RF1)
3541 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3542 if (ap->rxconfig & ANEG_CFG_RF2)
3543 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3544 if (ap->rxconfig & ANEG_CFG_NP)
3545 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3547 ap->link_time = ap->cur_time;
3549 ap->flags ^= (MR_TOGGLE_TX);
3550 if (ap->rxconfig & 0x0008)
3551 ap->flags |= MR_TOGGLE_RX;
3552 if (ap->rxconfig & ANEG_CFG_NP)
3553 ap->flags |= MR_NP_RX;
3554 ap->flags |= MR_PAGE_RX;
3556 ap->state = ANEG_STATE_COMPLETE_ACK;
3557 ret = ANEG_TIMER_ENAB;
3560 case ANEG_STATE_COMPLETE_ACK:
3561 if (ap->ability_match != 0 &&
3562 ap->rxconfig == 0) {
3563 ap->state = ANEG_STATE_AN_ENABLE;
3566 delta = ap->cur_time - ap->link_time;
3567 if (delta > ANEG_STATE_SETTLE_TIME) {
3568 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3569 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3571 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3572 !(ap->flags & MR_NP_RX)) {
3573 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3581 case ANEG_STATE_IDLE_DETECT_INIT:
3582 ap->link_time = ap->cur_time;
3583 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3584 tw32_f(MAC_MODE, tp->mac_mode);
3587 ap->state = ANEG_STATE_IDLE_DETECT;
3588 ret = ANEG_TIMER_ENAB;
3591 case ANEG_STATE_IDLE_DETECT:
3592 if (ap->ability_match != 0 &&
3593 ap->rxconfig == 0) {
3594 ap->state = ANEG_STATE_AN_ENABLE;
3597 delta = ap->cur_time - ap->link_time;
3598 if (delta > ANEG_STATE_SETTLE_TIME) {
3599 /* XXX another gem from the Broadcom driver :( */
3600 ap->state = ANEG_STATE_LINK_OK;
3604 case ANEG_STATE_LINK_OK:
3605 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3609 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3610 /* ??? unimplemented */
3613 case ANEG_STATE_NEXT_PAGE_WAIT:
3614 /* ??? unimplemented */
3625 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3628 struct tg3_fiber_aneginfo aninfo;
3629 int status = ANEG_FAILED;
3633 tw32_f(MAC_TX_AUTO_NEG, 0);
3635 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3636 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3639 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3642 memset(&aninfo, 0, sizeof(aninfo));
3643 aninfo.flags |= MR_AN_ENABLE;
3644 aninfo.state = ANEG_STATE_UNKNOWN;
3645 aninfo.cur_time = 0;
3647 while (++tick < 195000) {
3648 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3649 if (status == ANEG_DONE || status == ANEG_FAILED)
3655 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3656 tw32_f(MAC_MODE, tp->mac_mode);
3659 *txflags = aninfo.txconfig;
3660 *rxflags = aninfo.flags;
3662 if (status == ANEG_DONE &&
3663 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3664 MR_LP_ADV_FULL_DUPLEX)))
3670 static void tg3_init_bcm8002(struct tg3 *tp)
3672 u32 mac_status = tr32(MAC_STATUS);
3675 /* Reset when initting first time or we have a link. */
3676 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3677 !(mac_status & MAC_STATUS_PCS_SYNCED))
3680 /* Set PLL lock range. */
3681 tg3_writephy(tp, 0x16, 0x8007);
3684 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3686 /* Wait for reset to complete. */
3687 /* XXX schedule_timeout() ... */
3688 for (i = 0; i < 500; i++)
3691 /* Config mode; select PMA/Ch 1 regs. */
3692 tg3_writephy(tp, 0x10, 0x8411);
3694 /* Enable auto-lock and comdet, select txclk for tx. */
3695 tg3_writephy(tp, 0x11, 0x0a10);
3697 tg3_writephy(tp, 0x18, 0x00a0);
3698 tg3_writephy(tp, 0x16, 0x41ff);
3700 /* Assert and deassert POR. */
3701 tg3_writephy(tp, 0x13, 0x0400);
3703 tg3_writephy(tp, 0x13, 0x0000);
3705 tg3_writephy(tp, 0x11, 0x0a50);
3707 tg3_writephy(tp, 0x11, 0x0a10);
3709 /* Wait for signal to stabilize */
3710 /* XXX schedule_timeout() ... */
3711 for (i = 0; i < 15000; i++)
3714 /* Deselect the channel register so we can read the PHYID
3717 tg3_writephy(tp, 0x10, 0x8011);
3720 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3723 u32 sg_dig_ctrl, sg_dig_status;
3724 u32 serdes_cfg, expected_sg_dig_ctrl;
3725 int workaround, port_a;
3726 int current_link_up;
3729 expected_sg_dig_ctrl = 0;
3732 current_link_up = 0;
3734 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3735 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3737 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3740 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3741 /* preserve bits 20-23 for voltage regulator */
3742 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3745 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3747 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3748 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3750 u32 val = serdes_cfg;
3756 tw32_f(MAC_SERDES_CFG, val);
3759 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3761 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3762 tg3_setup_flow_control(tp, 0, 0);
3763 current_link_up = 1;
3768 /* Want auto-negotiation. */
3769 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3771 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3772 if (flowctrl & ADVERTISE_1000XPAUSE)
3773 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3774 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3775 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3777 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3778 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3779 tp->serdes_counter &&
3780 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3781 MAC_STATUS_RCVD_CFG)) ==
3782 MAC_STATUS_PCS_SYNCED)) {
3783 tp->serdes_counter--;
3784 current_link_up = 1;
3789 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3790 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3792 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3794 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3795 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3796 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3797 MAC_STATUS_SIGNAL_DET)) {
3798 sg_dig_status = tr32(SG_DIG_STATUS);
3799 mac_status = tr32(MAC_STATUS);
3801 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3802 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3803 u32 local_adv = 0, remote_adv = 0;
3805 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3806 local_adv |= ADVERTISE_1000XPAUSE;
3807 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3808 local_adv |= ADVERTISE_1000XPSE_ASYM;
3810 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3811 remote_adv |= LPA_1000XPAUSE;
3812 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3813 remote_adv |= LPA_1000XPAUSE_ASYM;
3815 tg3_setup_flow_control(tp, local_adv, remote_adv);
3816 current_link_up = 1;
3817 tp->serdes_counter = 0;
3818 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3819 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3820 if (tp->serdes_counter)
3821 tp->serdes_counter--;
3824 u32 val = serdes_cfg;
3831 tw32_f(MAC_SERDES_CFG, val);
3834 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3837 /* Link parallel detection - link is up */
3838 /* only if we have PCS_SYNC and not */
3839 /* receiving config code words */
3840 mac_status = tr32(MAC_STATUS);
3841 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3842 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3843 tg3_setup_flow_control(tp, 0, 0);
3844 current_link_up = 1;
3846 TG3_FLG2_PARALLEL_DETECT;
3847 tp->serdes_counter =
3848 SERDES_PARALLEL_DET_TIMEOUT;
3850 goto restart_autoneg;
3854 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3855 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3859 return current_link_up;
3862 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3864 int current_link_up = 0;
3866 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3869 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3870 u32 txflags, rxflags;
3873 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3874 u32 local_adv = 0, remote_adv = 0;
3876 if (txflags & ANEG_CFG_PS1)
3877 local_adv |= ADVERTISE_1000XPAUSE;
3878 if (txflags & ANEG_CFG_PS2)
3879 local_adv |= ADVERTISE_1000XPSE_ASYM;
3881 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3882 remote_adv |= LPA_1000XPAUSE;
3883 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3884 remote_adv |= LPA_1000XPAUSE_ASYM;
3886 tg3_setup_flow_control(tp, local_adv, remote_adv);
3888 current_link_up = 1;
3890 for (i = 0; i < 30; i++) {
3893 (MAC_STATUS_SYNC_CHANGED |
3894 MAC_STATUS_CFG_CHANGED));
3896 if ((tr32(MAC_STATUS) &
3897 (MAC_STATUS_SYNC_CHANGED |
3898 MAC_STATUS_CFG_CHANGED)) == 0)
3902 mac_status = tr32(MAC_STATUS);
3903 if (current_link_up == 0 &&
3904 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3905 !(mac_status & MAC_STATUS_RCVD_CFG))
3906 current_link_up = 1;
3908 tg3_setup_flow_control(tp, 0, 0);
3910 /* Forcing 1000FD link up. */
3911 current_link_up = 1;
3913 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3916 tw32_f(MAC_MODE, tp->mac_mode);
3921 return current_link_up;
3924 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3927 u16 orig_active_speed;
3928 u8 orig_active_duplex;
3930 int current_link_up;
3933 orig_pause_cfg = tp->link_config.active_flowctrl;
3934 orig_active_speed = tp->link_config.active_speed;
3935 orig_active_duplex = tp->link_config.active_duplex;
3937 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3938 netif_carrier_ok(tp->dev) &&
3939 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3940 mac_status = tr32(MAC_STATUS);
3941 mac_status &= (MAC_STATUS_PCS_SYNCED |
3942 MAC_STATUS_SIGNAL_DET |
3943 MAC_STATUS_CFG_CHANGED |
3944 MAC_STATUS_RCVD_CFG);
3945 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3946 MAC_STATUS_SIGNAL_DET)) {
3947 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3948 MAC_STATUS_CFG_CHANGED));
3953 tw32_f(MAC_TX_AUTO_NEG, 0);
3955 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3956 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3957 tw32_f(MAC_MODE, tp->mac_mode);
3960 if (tp->phy_id == PHY_ID_BCM8002)
3961 tg3_init_bcm8002(tp);
3963 /* Enable link change event even when serdes polling. */
3964 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3967 current_link_up = 0;
3968 mac_status = tr32(MAC_STATUS);
3970 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3971 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3973 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3975 tp->napi[0].hw_status->status =
3976 (SD_STATUS_UPDATED |
3977 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3979 for (i = 0; i < 100; i++) {
3980 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3981 MAC_STATUS_CFG_CHANGED));
3983 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3984 MAC_STATUS_CFG_CHANGED |
3985 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3989 mac_status = tr32(MAC_STATUS);
3990 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3991 current_link_up = 0;
3992 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3993 tp->serdes_counter == 0) {
3994 tw32_f(MAC_MODE, (tp->mac_mode |
3995 MAC_MODE_SEND_CONFIGS));
3997 tw32_f(MAC_MODE, tp->mac_mode);
4001 if (current_link_up == 1) {
4002 tp->link_config.active_speed = SPEED_1000;
4003 tp->link_config.active_duplex = DUPLEX_FULL;
4004 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4005 LED_CTRL_LNKLED_OVERRIDE |
4006 LED_CTRL_1000MBPS_ON));
4008 tp->link_config.active_speed = SPEED_INVALID;
4009 tp->link_config.active_duplex = DUPLEX_INVALID;
4010 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4011 LED_CTRL_LNKLED_OVERRIDE |
4012 LED_CTRL_TRAFFIC_OVERRIDE));
4015 if (current_link_up != netif_carrier_ok(tp->dev)) {
4016 if (current_link_up)
4017 netif_carrier_on(tp->dev);
4019 netif_carrier_off(tp->dev);
4020 tg3_link_report(tp);
4022 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4023 if (orig_pause_cfg != now_pause_cfg ||
4024 orig_active_speed != tp->link_config.active_speed ||
4025 orig_active_duplex != tp->link_config.active_duplex)
4026 tg3_link_report(tp);
4032 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4034 int current_link_up, err = 0;
4038 u32 local_adv, remote_adv;
4040 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4041 tw32_f(MAC_MODE, tp->mac_mode);
4047 (MAC_STATUS_SYNC_CHANGED |
4048 MAC_STATUS_CFG_CHANGED |
4049 MAC_STATUS_MI_COMPLETION |
4050 MAC_STATUS_LNKSTATE_CHANGED));
4056 current_link_up = 0;
4057 current_speed = SPEED_INVALID;
4058 current_duplex = DUPLEX_INVALID;
4060 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4061 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4063 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4064 bmsr |= BMSR_LSTATUS;
4066 bmsr &= ~BMSR_LSTATUS;
4069 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4071 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4072 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4073 /* do nothing, just check for link up at the end */
4074 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4077 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4078 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4079 ADVERTISE_1000XPAUSE |
4080 ADVERTISE_1000XPSE_ASYM |
4083 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4085 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4086 new_adv |= ADVERTISE_1000XHALF;
4087 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4088 new_adv |= ADVERTISE_1000XFULL;
4090 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4091 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4092 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4093 tg3_writephy(tp, MII_BMCR, bmcr);
4095 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4096 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4097 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4104 bmcr &= ~BMCR_SPEED1000;
4105 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4107 if (tp->link_config.duplex == DUPLEX_FULL)
4108 new_bmcr |= BMCR_FULLDPLX;
4110 if (new_bmcr != bmcr) {
4111 /* BMCR_SPEED1000 is a reserved bit that needs
4112 * to be set on write.
4114 new_bmcr |= BMCR_SPEED1000;
4116 /* Force a linkdown */
4117 if (netif_carrier_ok(tp->dev)) {
4120 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4121 adv &= ~(ADVERTISE_1000XFULL |
4122 ADVERTISE_1000XHALF |
4124 tg3_writephy(tp, MII_ADVERTISE, adv);
4125 tg3_writephy(tp, MII_BMCR, bmcr |
4129 netif_carrier_off(tp->dev);
4131 tg3_writephy(tp, MII_BMCR, new_bmcr);
4133 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4134 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4135 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4137 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4138 bmsr |= BMSR_LSTATUS;
4140 bmsr &= ~BMSR_LSTATUS;
4142 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4146 if (bmsr & BMSR_LSTATUS) {
4147 current_speed = SPEED_1000;
4148 current_link_up = 1;
4149 if (bmcr & BMCR_FULLDPLX)
4150 current_duplex = DUPLEX_FULL;
4152 current_duplex = DUPLEX_HALF;
4157 if (bmcr & BMCR_ANENABLE) {
4160 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4161 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4162 common = local_adv & remote_adv;
4163 if (common & (ADVERTISE_1000XHALF |
4164 ADVERTISE_1000XFULL)) {
4165 if (common & ADVERTISE_1000XFULL)
4166 current_duplex = DUPLEX_FULL;
4168 current_duplex = DUPLEX_HALF;
4171 current_link_up = 0;
4175 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4176 tg3_setup_flow_control(tp, local_adv, remote_adv);
4178 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4179 if (tp->link_config.active_duplex == DUPLEX_HALF)
4180 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4182 tw32_f(MAC_MODE, tp->mac_mode);
4185 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4187 tp->link_config.active_speed = current_speed;
4188 tp->link_config.active_duplex = current_duplex;
4190 if (current_link_up != netif_carrier_ok(tp->dev)) {
4191 if (current_link_up)
4192 netif_carrier_on(tp->dev);
4194 netif_carrier_off(tp->dev);
4195 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4197 tg3_link_report(tp);
4202 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4204 if (tp->serdes_counter) {
4205 /* Give autoneg time to complete. */
4206 tp->serdes_counter--;
4209 if (!netif_carrier_ok(tp->dev) &&
4210 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4213 tg3_readphy(tp, MII_BMCR, &bmcr);
4214 if (bmcr & BMCR_ANENABLE) {
4217 /* Select shadow register 0x1f */
4218 tg3_writephy(tp, 0x1c, 0x7c00);
4219 tg3_readphy(tp, 0x1c, &phy1);
4221 /* Select expansion interrupt status register */
4222 tg3_writephy(tp, 0x17, 0x0f01);
4223 tg3_readphy(tp, 0x15, &phy2);
4224 tg3_readphy(tp, 0x15, &phy2);
4226 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4227 /* We have signal detect and not receiving
4228 * config code words, link is up by parallel
4232 bmcr &= ~BMCR_ANENABLE;
4233 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4234 tg3_writephy(tp, MII_BMCR, bmcr);
4235 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4239 else if (netif_carrier_ok(tp->dev) &&
4240 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4241 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4244 /* Select expansion interrupt status register */
4245 tg3_writephy(tp, 0x17, 0x0f01);
4246 tg3_readphy(tp, 0x15, &phy2);
4250 /* Config code words received, turn on autoneg. */
4251 tg3_readphy(tp, MII_BMCR, &bmcr);
4252 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4254 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4260 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4264 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4265 err = tg3_setup_fiber_phy(tp, force_reset);
4266 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4267 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4269 err = tg3_setup_copper_phy(tp, force_reset);
4272 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4275 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4276 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4278 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4283 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4284 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4285 tw32(GRC_MISC_CFG, val);
4288 if (tp->link_config.active_speed == SPEED_1000 &&
4289 tp->link_config.active_duplex == DUPLEX_HALF)
4290 tw32(MAC_TX_LENGTHS,
4291 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4292 (6 << TX_LENGTHS_IPG_SHIFT) |
4293 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4295 tw32(MAC_TX_LENGTHS,
4296 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4297 (6 << TX_LENGTHS_IPG_SHIFT) |
4298 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4300 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4301 if (netif_carrier_ok(tp->dev)) {
4302 tw32(HOSTCC_STAT_COAL_TICKS,
4303 tp->coal.stats_block_coalesce_usecs);
4305 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4309 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4310 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4311 if (!netif_carrier_ok(tp->dev))
4312 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4315 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4316 tw32(PCIE_PWR_MGMT_THRESH, val);
4322 /* This is called whenever we suspect that the system chipset is re-
4323 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4324 * is bogus tx completions. We try to recover by setting the
4325 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4328 static void tg3_tx_recover(struct tg3 *tp)
4330 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4331 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4333 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4334 "mapped I/O cycles to the network device, attempting to "
4335 "recover. Please report the problem to the driver maintainer "
4336 "and include system chipset information.\n", tp->dev->name);
4338 spin_lock(&tp->lock);
4339 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4340 spin_unlock(&tp->lock);
4343 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4346 return tnapi->tx_pending -
4347 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4350 /* Tigon3 never reports partial packet sends. So we do not
4351 * need special logic to handle SKBs that have not had all
4352 * of their frags sent yet, like SunGEM does.
4354 static void tg3_tx(struct tg3_napi *tnapi)
4356 struct tg3 *tp = tnapi->tp;
4357 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4358 u32 sw_idx = tnapi->tx_cons;
4359 struct netdev_queue *txq;
4360 int index = tnapi - tp->napi;
4362 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4365 txq = netdev_get_tx_queue(tp->dev, index);
4367 while (sw_idx != hw_idx) {
4368 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4369 struct sk_buff *skb = ri->skb;
4372 if (unlikely(skb == NULL)) {
4377 pci_unmap_single(tp->pdev,
4378 pci_unmap_addr(ri, mapping),
4384 sw_idx = NEXT_TX(sw_idx);
4386 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4387 ri = &tnapi->tx_buffers[sw_idx];
4388 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4391 pci_unmap_page(tp->pdev,
4392 pci_unmap_addr(ri, mapping),
4393 skb_shinfo(skb)->frags[i].size,
4395 sw_idx = NEXT_TX(sw_idx);
4400 if (unlikely(tx_bug)) {
4406 tnapi->tx_cons = sw_idx;
4408 /* Need to make the tx_cons update visible to tg3_start_xmit()
4409 * before checking for netif_queue_stopped(). Without the
4410 * memory barrier, there is a small possibility that tg3_start_xmit()
4411 * will miss it and cause the queue to be stopped forever.
4415 if (unlikely(netif_tx_queue_stopped(txq) &&
4416 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4417 __netif_tx_lock(txq, smp_processor_id());
4418 if (netif_tx_queue_stopped(txq) &&
4419 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4420 netif_tx_wake_queue(txq);
4421 __netif_tx_unlock(txq);
4425 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4430 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4431 map_sz, PCI_DMA_FROMDEVICE);
4432 dev_kfree_skb_any(ri->skb);
4436 /* Returns size of skb allocated or < 0 on error.
4438 * We only need to fill in the address because the other members
4439 * of the RX descriptor are invariant, see tg3_init_rings.
4441 * Note the purposeful assymetry of cpu vs. chip accesses. For
4442 * posting buffers we only dirty the first cache line of the RX
4443 * descriptor (containing the address). Whereas for the RX status
4444 * buffers the cpu only reads the last cacheline of the RX descriptor
4445 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4447 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4448 u32 opaque_key, u32 dest_idx_unmasked)
4450 struct tg3_rx_buffer_desc *desc;
4451 struct ring_info *map, *src_map;
4452 struct sk_buff *skb;
4454 int skb_size, dest_idx;
4457 switch (opaque_key) {
4458 case RXD_OPAQUE_RING_STD:
4459 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4460 desc = &tpr->rx_std[dest_idx];
4461 map = &tpr->rx_std_buffers[dest_idx];
4462 skb_size = tp->rx_pkt_map_sz;
4465 case RXD_OPAQUE_RING_JUMBO:
4466 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4467 desc = &tpr->rx_jmb[dest_idx].std;
4468 map = &tpr->rx_jmb_buffers[dest_idx];
4469 skb_size = TG3_RX_JMB_MAP_SZ;
4476 /* Do not overwrite any of the map or rp information
4477 * until we are sure we can commit to a new buffer.
4479 * Callers depend upon this behavior and assume that
4480 * we leave everything unchanged if we fail.
4482 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4486 skb_reserve(skb, tp->rx_offset);
4488 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4489 PCI_DMA_FROMDEVICE);
4490 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4496 pci_unmap_addr_set(map, mapping, mapping);
4498 desc->addr_hi = ((u64)mapping >> 32);
4499 desc->addr_lo = ((u64)mapping & 0xffffffff);
4504 /* We only need to move over in the address because the other
4505 * members of the RX descriptor are invariant. See notes above
4506 * tg3_alloc_rx_skb for full details.
4508 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4509 struct tg3_rx_prodring_set *dpr,
4510 u32 opaque_key, int src_idx,
4511 u32 dest_idx_unmasked)
4513 struct tg3 *tp = tnapi->tp;
4514 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4515 struct ring_info *src_map, *dest_map;
4517 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4519 switch (opaque_key) {
4520 case RXD_OPAQUE_RING_STD:
4521 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4522 dest_desc = &dpr->rx_std[dest_idx];
4523 dest_map = &dpr->rx_std_buffers[dest_idx];
4524 src_desc = &spr->rx_std[src_idx];
4525 src_map = &spr->rx_std_buffers[src_idx];
4528 case RXD_OPAQUE_RING_JUMBO:
4529 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4530 dest_desc = &dpr->rx_jmb[dest_idx].std;
4531 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4532 src_desc = &spr->rx_jmb[src_idx].std;
4533 src_map = &spr->rx_jmb_buffers[src_idx];
4540 dest_map->skb = src_map->skb;
4541 pci_unmap_addr_set(dest_map, mapping,
4542 pci_unmap_addr(src_map, mapping));
4543 dest_desc->addr_hi = src_desc->addr_hi;
4544 dest_desc->addr_lo = src_desc->addr_lo;
4545 src_map->skb = NULL;
4548 /* The RX ring scheme is composed of multiple rings which post fresh
4549 * buffers to the chip, and one special ring the chip uses to report
4550 * status back to the host.
4552 * The special ring reports the status of received packets to the
4553 * host. The chip does not write into the original descriptor the
4554 * RX buffer was obtained from. The chip simply takes the original
4555 * descriptor as provided by the host, updates the status and length
4556 * field, then writes this into the next status ring entry.
4558 * Each ring the host uses to post buffers to the chip is described
4559 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4560 * it is first placed into the on-chip ram. When the packet's length
4561 * is known, it walks down the TG3_BDINFO entries to select the ring.
4562 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4563 * which is within the range of the new packet's length is chosen.
4565 * The "separate ring for rx status" scheme may sound queer, but it makes
4566 * sense from a cache coherency perspective. If only the host writes
4567 * to the buffer post rings, and only the chip writes to the rx status
4568 * rings, then cache lines never move beyond shared-modified state.
4569 * If both the host and chip were to write into the same ring, cache line
4570 * eviction could occur since both entities want it in an exclusive state.
4572 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4574 struct tg3 *tp = tnapi->tp;
4575 u32 work_mask, rx_std_posted = 0;
4576 u32 std_prod_idx, jmb_prod_idx;
4577 u32 sw_idx = tnapi->rx_rcb_ptr;
4580 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4582 hw_idx = *(tnapi->rx_rcb_prod_idx);
4584 * We need to order the read of hw_idx and the read of
4585 * the opaque cookie.
4590 std_prod_idx = tpr->rx_std_prod_idx;
4591 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4592 while (sw_idx != hw_idx && budget > 0) {
4593 struct ring_info *ri;
4594 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4596 struct sk_buff *skb;
4597 dma_addr_t dma_addr;
4598 u32 opaque_key, desc_idx, *post_ptr;
4600 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4601 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4602 if (opaque_key == RXD_OPAQUE_RING_STD) {
4603 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4604 dma_addr = pci_unmap_addr(ri, mapping);
4606 post_ptr = &std_prod_idx;
4608 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4609 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4610 dma_addr = pci_unmap_addr(ri, mapping);
4612 post_ptr = &jmb_prod_idx;
4614 goto next_pkt_nopost;
4616 work_mask |= opaque_key;
4618 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4619 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4621 tg3_recycle_rx(tnapi, tpr, opaque_key,
4622 desc_idx, *post_ptr);
4624 /* Other statistics kept track of by card. */
4625 tp->net_stats.rx_dropped++;
4629 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4632 if (len > RX_COPY_THRESHOLD &&
4633 tp->rx_offset == NET_IP_ALIGN) {
4634 /* rx_offset will likely not equal NET_IP_ALIGN
4635 * if this is a 5701 card running in PCI-X mode
4636 * [see tg3_get_invariants()]
4640 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4647 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4648 PCI_DMA_FROMDEVICE);
4652 struct sk_buff *copy_skb;
4654 tg3_recycle_rx(tnapi, tpr, opaque_key,
4655 desc_idx, *post_ptr);
4657 copy_skb = netdev_alloc_skb(tp->dev,
4658 len + TG3_RAW_IP_ALIGN);
4659 if (copy_skb == NULL)
4660 goto drop_it_no_recycle;
4662 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4663 skb_put(copy_skb, len);
4664 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4665 skb_copy_from_linear_data(skb, copy_skb->data, len);
4666 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4668 /* We'll reuse the original ring buffer. */
4672 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4673 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4674 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4675 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4676 skb->ip_summed = CHECKSUM_UNNECESSARY;
4678 skb->ip_summed = CHECKSUM_NONE;
4680 skb->protocol = eth_type_trans(skb, tp->dev);
4682 if (len > (tp->dev->mtu + ETH_HLEN) &&
4683 skb->protocol != htons(ETH_P_8021Q)) {
4688 #if TG3_VLAN_TAG_USED
4689 if (tp->vlgrp != NULL &&
4690 desc->type_flags & RXD_FLAG_VLAN) {
4691 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4692 desc->err_vlan & RXD_VLAN_MASK, skb);
4695 napi_gro_receive(&tnapi->napi, skb);
4703 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4704 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4705 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
4706 work_mask &= ~RXD_OPAQUE_RING_STD;
4711 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4713 /* Refresh hw_idx to see if there is new work */
4714 if (sw_idx == hw_idx) {
4715 hw_idx = *(tnapi->rx_rcb_prod_idx);
4720 /* ACK the status ring. */
4721 tnapi->rx_rcb_ptr = sw_idx;
4722 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4724 /* Refill RX ring(s). */
4725 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4726 if (work_mask & RXD_OPAQUE_RING_STD) {
4727 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4728 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4729 tpr->rx_std_prod_idx);
4731 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4732 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4733 TG3_RX_JUMBO_RING_SIZE;
4734 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4735 tpr->rx_jmb_prod_idx);
4738 } else if (work_mask) {
4739 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4740 * updated before the producer indices can be updated.
4744 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4745 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4747 napi_schedule(&tp->napi[1].napi);
4753 static void tg3_poll_link(struct tg3 *tp)
4755 /* handle link change and other phy events */
4756 if (!(tp->tg3_flags &
4757 (TG3_FLAG_USE_LINKCHG_REG |
4758 TG3_FLAG_POLL_SERDES))) {
4759 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4761 if (sblk->status & SD_STATUS_LINK_CHG) {
4762 sblk->status = SD_STATUS_UPDATED |
4763 (sblk->status & ~SD_STATUS_LINK_CHG);
4764 spin_lock(&tp->lock);
4765 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4767 (MAC_STATUS_SYNC_CHANGED |
4768 MAC_STATUS_CFG_CHANGED |
4769 MAC_STATUS_MI_COMPLETION |
4770 MAC_STATUS_LNKSTATE_CHANGED));
4773 tg3_setup_phy(tp, 0);
4774 spin_unlock(&tp->lock);
4779 static void tg3_rx_prodring_xfer(struct tg3 *tp,
4780 struct tg3_rx_prodring_set *dpr,
4781 struct tg3_rx_prodring_set *spr)
4783 u32 si, di, cpycnt, src_prod_idx;
4787 src_prod_idx = spr->rx_std_prod_idx;
4789 /* Make sure updates to the rx_std_buffers[] entries and the
4790 * standard producer index are seen in the correct order.
4794 if (spr->rx_std_cons_idx == src_prod_idx)
4797 if (spr->rx_std_cons_idx < src_prod_idx)
4798 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4800 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4802 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4804 si = spr->rx_std_cons_idx;
4805 di = dpr->rx_std_prod_idx;
4807 memcpy(&dpr->rx_std_buffers[di],
4808 &spr->rx_std_buffers[si],
4809 cpycnt * sizeof(struct ring_info));
4811 for (i = 0; i < cpycnt; i++, di++, si++) {
4812 struct tg3_rx_buffer_desc *sbd, *dbd;
4813 sbd = &spr->rx_std[si];
4814 dbd = &dpr->rx_std[di];
4815 dbd->addr_hi = sbd->addr_hi;
4816 dbd->addr_lo = sbd->addr_lo;
4819 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4821 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4826 src_prod_idx = spr->rx_jmb_prod_idx;
4828 /* Make sure updates to the rx_jmb_buffers[] entries and
4829 * the jumbo producer index are seen in the correct order.
4833 if (spr->rx_jmb_cons_idx == src_prod_idx)
4836 if (spr->rx_jmb_cons_idx < src_prod_idx)
4837 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4839 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4841 cpycnt = min(cpycnt,
4842 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4844 si = spr->rx_jmb_cons_idx;
4845 di = dpr->rx_jmb_prod_idx;
4847 memcpy(&dpr->rx_jmb_buffers[di],
4848 &spr->rx_jmb_buffers[si],
4849 cpycnt * sizeof(struct ring_info));
4851 for (i = 0; i < cpycnt; i++, di++, si++) {
4852 struct tg3_rx_buffer_desc *sbd, *dbd;
4853 sbd = &spr->rx_jmb[si].std;
4854 dbd = &dpr->rx_jmb[di].std;
4855 dbd->addr_hi = sbd->addr_hi;
4856 dbd->addr_lo = sbd->addr_lo;
4859 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4860 TG3_RX_JUMBO_RING_SIZE;
4861 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4862 TG3_RX_JUMBO_RING_SIZE;
4866 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4868 struct tg3 *tp = tnapi->tp;
4870 /* run TX completion thread */
4871 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4873 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4877 /* run RX thread, within the bounds set by NAPI.
4878 * All RX "locking" is done by ensuring outside
4879 * code synchronizes with tg3->napi.poll()
4881 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4882 work_done += tg3_rx(tnapi, budget - work_done);
4884 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4886 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4887 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4889 for (i = 2; i < tp->irq_cnt; i++)
4890 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4891 tp->napi[i].prodring);
4895 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4896 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4897 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4900 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4901 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4902 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4911 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4913 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4914 struct tg3 *tp = tnapi->tp;
4916 struct tg3_hw_status *sblk = tnapi->hw_status;
4919 work_done = tg3_poll_work(tnapi, work_done, budget);
4921 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4924 if (unlikely(work_done >= budget))
4927 /* tp->last_tag is used in tg3_restart_ints() below
4928 * to tell the hw how much work has been processed,
4929 * so we must read it before checking for more work.
4931 tnapi->last_tag = sblk->status_tag;
4932 tnapi->last_irq_tag = tnapi->last_tag;
4935 /* check for RX/TX work to do */
4936 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4937 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4938 napi_complete(napi);
4939 /* Reenable interrupts. */
4940 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4949 /* work_done is guaranteed to be less than budget. */
4950 napi_complete(napi);
4951 schedule_work(&tp->reset_task);
4955 static int tg3_poll(struct napi_struct *napi, int budget)
4957 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4958 struct tg3 *tp = tnapi->tp;
4960 struct tg3_hw_status *sblk = tnapi->hw_status;
4965 work_done = tg3_poll_work(tnapi, work_done, budget);
4967 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4970 if (unlikely(work_done >= budget))
4973 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4974 /* tp->last_tag is used in tg3_int_reenable() below
4975 * to tell the hw how much work has been processed,
4976 * so we must read it before checking for more work.
4978 tnapi->last_tag = sblk->status_tag;
4979 tnapi->last_irq_tag = tnapi->last_tag;
4982 sblk->status &= ~SD_STATUS_UPDATED;
4984 if (likely(!tg3_has_work(tnapi))) {
4985 napi_complete(napi);
4986 tg3_int_reenable(tnapi);
4994 /* work_done is guaranteed to be less than budget. */
4995 napi_complete(napi);
4996 schedule_work(&tp->reset_task);
5000 static void tg3_irq_quiesce(struct tg3 *tp)
5004 BUG_ON(tp->irq_sync);
5009 for (i = 0; i < tp->irq_cnt; i++)
5010 synchronize_irq(tp->napi[i].irq_vec);
5013 static inline int tg3_irq_sync(struct tg3 *tp)
5015 return tp->irq_sync;
5018 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5019 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5020 * with as well. Most of the time, this is not necessary except when
5021 * shutting down the device.
5023 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5025 spin_lock_bh(&tp->lock);
5027 tg3_irq_quiesce(tp);
5030 static inline void tg3_full_unlock(struct tg3 *tp)
5032 spin_unlock_bh(&tp->lock);
5035 /* One-shot MSI handler - Chip automatically disables interrupt
5036 * after sending MSI so driver doesn't have to do it.
5038 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5040 struct tg3_napi *tnapi = dev_id;
5041 struct tg3 *tp = tnapi->tp;
5043 prefetch(tnapi->hw_status);
5045 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5047 if (likely(!tg3_irq_sync(tp)))
5048 napi_schedule(&tnapi->napi);
5053 /* MSI ISR - No need to check for interrupt sharing and no need to
5054 * flush status block and interrupt mailbox. PCI ordering rules
5055 * guarantee that MSI will arrive after the status block.
5057 static irqreturn_t tg3_msi(int irq, void *dev_id)
5059 struct tg3_napi *tnapi = dev_id;
5060 struct tg3 *tp = tnapi->tp;
5062 prefetch(tnapi->hw_status);
5064 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5066 * Writing any value to intr-mbox-0 clears PCI INTA# and
5067 * chip-internal interrupt pending events.
5068 * Writing non-zero to intr-mbox-0 additional tells the
5069 * NIC to stop sending us irqs, engaging "in-intr-handler"
5072 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5073 if (likely(!tg3_irq_sync(tp)))
5074 napi_schedule(&tnapi->napi);
5076 return IRQ_RETVAL(1);
5079 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5081 struct tg3_napi *tnapi = dev_id;
5082 struct tg3 *tp = tnapi->tp;
5083 struct tg3_hw_status *sblk = tnapi->hw_status;
5084 unsigned int handled = 1;
5086 /* In INTx mode, it is possible for the interrupt to arrive at
5087 * the CPU before the status block posted prior to the interrupt.
5088 * Reading the PCI State register will confirm whether the
5089 * interrupt is ours and will flush the status block.
5091 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5092 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5093 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5100 * Writing any value to intr-mbox-0 clears PCI INTA# and
5101 * chip-internal interrupt pending events.
5102 * Writing non-zero to intr-mbox-0 additional tells the
5103 * NIC to stop sending us irqs, engaging "in-intr-handler"
5106 * Flush the mailbox to de-assert the IRQ immediately to prevent
5107 * spurious interrupts. The flush impacts performance but
5108 * excessive spurious interrupts can be worse in some cases.
5110 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5111 if (tg3_irq_sync(tp))
5113 sblk->status &= ~SD_STATUS_UPDATED;
5114 if (likely(tg3_has_work(tnapi))) {
5115 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5116 napi_schedule(&tnapi->napi);
5118 /* No work, shared interrupt perhaps? re-enable
5119 * interrupts, and flush that PCI write
5121 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5125 return IRQ_RETVAL(handled);
5128 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5130 struct tg3_napi *tnapi = dev_id;
5131 struct tg3 *tp = tnapi->tp;
5132 struct tg3_hw_status *sblk = tnapi->hw_status;
5133 unsigned int handled = 1;
5135 /* In INTx mode, it is possible for the interrupt to arrive at
5136 * the CPU before the status block posted prior to the interrupt.
5137 * Reading the PCI State register will confirm whether the
5138 * interrupt is ours and will flush the status block.
5140 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5141 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5142 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5149 * writing any value to intr-mbox-0 clears PCI INTA# and
5150 * chip-internal interrupt pending events.
5151 * writing non-zero to intr-mbox-0 additional tells the
5152 * NIC to stop sending us irqs, engaging "in-intr-handler"
5155 * Flush the mailbox to de-assert the IRQ immediately to prevent
5156 * spurious interrupts. The flush impacts performance but
5157 * excessive spurious interrupts can be worse in some cases.
5159 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5162 * In a shared interrupt configuration, sometimes other devices'
5163 * interrupts will scream. We record the current status tag here
5164 * so that the above check can report that the screaming interrupts
5165 * are unhandled. Eventually they will be silenced.
5167 tnapi->last_irq_tag = sblk->status_tag;
5169 if (tg3_irq_sync(tp))
5172 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5174 napi_schedule(&tnapi->napi);
5177 return IRQ_RETVAL(handled);
5180 /* ISR for interrupt test */
5181 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5183 struct tg3_napi *tnapi = dev_id;
5184 struct tg3 *tp = tnapi->tp;
5185 struct tg3_hw_status *sblk = tnapi->hw_status;
5187 if ((sblk->status & SD_STATUS_UPDATED) ||
5188 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5189 tg3_disable_ints(tp);
5190 return IRQ_RETVAL(1);
5192 return IRQ_RETVAL(0);
5195 static int tg3_init_hw(struct tg3 *, int);
5196 static int tg3_halt(struct tg3 *, int, int);
5198 /* Restart hardware after configuration changes, self-test, etc.
5199 * Invoked with tp->lock held.
5201 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5202 __releases(tp->lock)
5203 __acquires(tp->lock)
5207 err = tg3_init_hw(tp, reset_phy);
5209 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5210 "aborting.\n", tp->dev->name);
5211 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5212 tg3_full_unlock(tp);
5213 del_timer_sync(&tp->timer);
5215 tg3_napi_enable(tp);
5217 tg3_full_lock(tp, 0);
5222 #ifdef CONFIG_NET_POLL_CONTROLLER
5223 static void tg3_poll_controller(struct net_device *dev)
5226 struct tg3 *tp = netdev_priv(dev);
5228 for (i = 0; i < tp->irq_cnt; i++)
5229 tg3_interrupt(tp->napi[i].irq_vec, dev);
5233 static void tg3_reset_task(struct work_struct *work)
5235 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5237 unsigned int restart_timer;
5239 tg3_full_lock(tp, 0);
5241 if (!netif_running(tp->dev)) {
5242 tg3_full_unlock(tp);
5246 tg3_full_unlock(tp);
5252 tg3_full_lock(tp, 1);
5254 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5255 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5257 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5258 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5259 tp->write32_rx_mbox = tg3_write_flush_reg32;
5260 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5261 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5265 err = tg3_init_hw(tp, 1);
5269 tg3_netif_start(tp);
5272 mod_timer(&tp->timer, jiffies + 1);
5275 tg3_full_unlock(tp);
5281 static void tg3_dump_short_state(struct tg3 *tp)
5283 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5284 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5285 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5286 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5289 static void tg3_tx_timeout(struct net_device *dev)
5291 struct tg3 *tp = netdev_priv(dev);
5293 if (netif_msg_tx_err(tp)) {
5294 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5296 tg3_dump_short_state(tp);
5299 schedule_work(&tp->reset_task);
5302 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5303 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5305 u32 base = (u32) mapping & 0xffffffff;
5307 return ((base > 0xffffdcc0) &&
5308 (base + len + 8 < base));
5311 /* Test for DMA addresses > 40-bit */
5312 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5315 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5316 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5317 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5324 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5326 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5327 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5328 struct sk_buff *skb, u32 last_plus_one,
5329 u32 *start, u32 base_flags, u32 mss)
5331 struct tg3 *tp = tnapi->tp;
5332 struct sk_buff *new_skb;
5333 dma_addr_t new_addr = 0;
5337 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5338 new_skb = skb_copy(skb, GFP_ATOMIC);
5340 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5342 new_skb = skb_copy_expand(skb,
5343 skb_headroom(skb) + more_headroom,
5344 skb_tailroom(skb), GFP_ATOMIC);
5350 /* New SKB is guaranteed to be linear. */
5352 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5354 /* Make sure the mapping succeeded */
5355 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5357 dev_kfree_skb(new_skb);
5360 /* Make sure new skb does not cross any 4G boundaries.
5361 * Drop the packet if it does.
5363 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5364 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5365 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5368 dev_kfree_skb(new_skb);
5371 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5372 base_flags, 1 | (mss << 1));
5373 *start = NEXT_TX(entry);
5377 /* Now clean up the sw ring entries. */
5379 while (entry != last_plus_one) {
5383 len = skb_headlen(skb);
5385 len = skb_shinfo(skb)->frags[i-1].size;
5387 pci_unmap_single(tp->pdev,
5388 pci_unmap_addr(&tnapi->tx_buffers[entry],
5390 len, PCI_DMA_TODEVICE);
5392 tnapi->tx_buffers[entry].skb = new_skb;
5393 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5396 tnapi->tx_buffers[entry].skb = NULL;
5398 entry = NEXT_TX(entry);
5407 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5408 dma_addr_t mapping, int len, u32 flags,
5411 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5412 int is_end = (mss_and_is_end & 0x1);
5413 u32 mss = (mss_and_is_end >> 1);
5417 flags |= TXD_FLAG_END;
5418 if (flags & TXD_FLAG_VLAN) {
5419 vlan_tag = flags >> 16;
5422 vlan_tag |= (mss << TXD_MSS_SHIFT);
5424 txd->addr_hi = ((u64) mapping >> 32);
5425 txd->addr_lo = ((u64) mapping & 0xffffffff);
5426 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5427 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5430 /* hard_start_xmit for devices that don't have any bugs and
5431 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5433 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5434 struct net_device *dev)
5436 struct tg3 *tp = netdev_priv(dev);
5437 u32 len, entry, base_flags, mss;
5439 struct tg3_napi *tnapi;
5440 struct netdev_queue *txq;
5441 unsigned int i, last;
5444 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5445 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5446 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5449 /* We are running in BH disabled context with netif_tx_lock
5450 * and TX reclaim runs via tp->napi.poll inside of a software
5451 * interrupt. Furthermore, IRQ processing runs lockless so we have
5452 * no IRQ context deadlocks to worry about either. Rejoice!
5454 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5455 if (!netif_tx_queue_stopped(txq)) {
5456 netif_tx_stop_queue(txq);
5458 /* This is a hard error, log it. */
5459 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5460 "queue awake!\n", dev->name);
5462 return NETDEV_TX_BUSY;
5465 entry = tnapi->tx_prod;
5468 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5469 int tcp_opt_len, ip_tcp_len;
5472 if (skb_header_cloned(skb) &&
5473 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5478 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5479 hdrlen = skb_headlen(skb) - ETH_HLEN;
5481 struct iphdr *iph = ip_hdr(skb);
5483 tcp_opt_len = tcp_optlen(skb);
5484 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5487 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5488 hdrlen = ip_tcp_len + tcp_opt_len;
5491 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5492 mss |= (hdrlen & 0xc) << 12;
5494 base_flags |= 0x00000010;
5495 base_flags |= (hdrlen & 0x3e0) << 5;
5499 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5500 TXD_FLAG_CPU_POST_DMA);
5502 tcp_hdr(skb)->check = 0;
5505 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5506 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5507 #if TG3_VLAN_TAG_USED
5508 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5509 base_flags |= (TXD_FLAG_VLAN |
5510 (vlan_tx_tag_get(skb) << 16));
5513 len = skb_headlen(skb);
5515 /* Queue skb data, a.k.a. the main skb fragment. */
5516 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5517 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5522 tnapi->tx_buffers[entry].skb = skb;
5523 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5525 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5526 !mss && skb->len > ETH_DATA_LEN)
5527 base_flags |= TXD_FLAG_JMB_PKT;
5529 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5530 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5532 entry = NEXT_TX(entry);
5534 /* Now loop through additional data fragments, and queue them. */
5535 if (skb_shinfo(skb)->nr_frags > 0) {
5536 last = skb_shinfo(skb)->nr_frags - 1;
5537 for (i = 0; i <= last; i++) {
5538 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5541 mapping = pci_map_page(tp->pdev,
5544 len, PCI_DMA_TODEVICE);
5545 if (pci_dma_mapping_error(tp->pdev, mapping))
5548 tnapi->tx_buffers[entry].skb = NULL;
5549 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5552 tg3_set_txd(tnapi, entry, mapping, len,
5553 base_flags, (i == last) | (mss << 1));
5555 entry = NEXT_TX(entry);
5559 /* Packets are ready, update Tx producer idx local and on card. */
5560 tw32_tx_mbox(tnapi->prodmbox, entry);
5562 tnapi->tx_prod = entry;
5563 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5564 netif_tx_stop_queue(txq);
5565 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5566 netif_tx_wake_queue(txq);
5572 return NETDEV_TX_OK;
5576 entry = tnapi->tx_prod;
5577 tnapi->tx_buffers[entry].skb = NULL;
5578 pci_unmap_single(tp->pdev,
5579 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5582 for (i = 0; i <= last; i++) {
5583 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5584 entry = NEXT_TX(entry);
5586 pci_unmap_page(tp->pdev,
5587 pci_unmap_addr(&tnapi->tx_buffers[entry],
5589 frag->size, PCI_DMA_TODEVICE);
5593 return NETDEV_TX_OK;
5596 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5597 struct net_device *);
5599 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5600 * TSO header is greater than 80 bytes.
5602 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5604 struct sk_buff *segs, *nskb;
5605 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5607 /* Estimate the number of fragments in the worst case */
5608 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5609 netif_stop_queue(tp->dev);
5610 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5611 return NETDEV_TX_BUSY;
5613 netif_wake_queue(tp->dev);
5616 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5618 goto tg3_tso_bug_end;
5624 tg3_start_xmit_dma_bug(nskb, tp->dev);
5630 return NETDEV_TX_OK;
5633 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5634 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5636 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5637 struct net_device *dev)
5639 struct tg3 *tp = netdev_priv(dev);
5640 u32 len, entry, base_flags, mss;
5641 int would_hit_hwbug;
5643 struct tg3_napi *tnapi;
5644 struct netdev_queue *txq;
5645 unsigned int i, last;
5648 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5649 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5650 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5653 /* We are running in BH disabled context with netif_tx_lock
5654 * and TX reclaim runs via tp->napi.poll inside of a software
5655 * interrupt. Furthermore, IRQ processing runs lockless so we have
5656 * no IRQ context deadlocks to worry about either. Rejoice!
5658 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5659 if (!netif_tx_queue_stopped(txq)) {
5660 netif_tx_stop_queue(txq);
5662 /* This is a hard error, log it. */
5663 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5664 "queue awake!\n", dev->name);
5666 return NETDEV_TX_BUSY;
5669 entry = tnapi->tx_prod;
5671 if (skb->ip_summed == CHECKSUM_PARTIAL)
5672 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5674 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5676 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5678 if (skb_header_cloned(skb) &&
5679 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5684 tcp_opt_len = tcp_optlen(skb);
5685 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5687 hdr_len = ip_tcp_len + tcp_opt_len;
5688 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5689 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5690 return (tg3_tso_bug(tp, skb));
5692 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5693 TXD_FLAG_CPU_POST_DMA);
5697 iph->tot_len = htons(mss + hdr_len);
5698 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5699 tcp_hdr(skb)->check = 0;
5700 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5702 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5707 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5708 mss |= (hdr_len & 0xc) << 12;
5710 base_flags |= 0x00000010;
5711 base_flags |= (hdr_len & 0x3e0) << 5;
5712 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5713 mss |= hdr_len << 9;
5714 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5716 if (tcp_opt_len || iph->ihl > 5) {
5719 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5720 mss |= (tsflags << 11);
5723 if (tcp_opt_len || iph->ihl > 5) {
5726 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5727 base_flags |= tsflags << 12;
5731 #if TG3_VLAN_TAG_USED
5732 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5733 base_flags |= (TXD_FLAG_VLAN |
5734 (vlan_tx_tag_get(skb) << 16));
5737 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5738 !mss && skb->len > ETH_DATA_LEN)
5739 base_flags |= TXD_FLAG_JMB_PKT;
5741 len = skb_headlen(skb);
5743 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5744 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5749 tnapi->tx_buffers[entry].skb = skb;
5750 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5752 would_hit_hwbug = 0;
5754 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5755 would_hit_hwbug = 1;
5757 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5758 tg3_4g_overflow_test(mapping, len))
5759 would_hit_hwbug = 1;
5761 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5762 tg3_40bit_overflow_test(tp, mapping, len))
5763 would_hit_hwbug = 1;
5765 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5766 would_hit_hwbug = 1;
5768 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5769 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5771 entry = NEXT_TX(entry);
5773 /* Now loop through additional data fragments, and queue them. */
5774 if (skb_shinfo(skb)->nr_frags > 0) {
5775 last = skb_shinfo(skb)->nr_frags - 1;
5776 for (i = 0; i <= last; i++) {
5777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5780 mapping = pci_map_page(tp->pdev,
5783 len, PCI_DMA_TODEVICE);
5785 tnapi->tx_buffers[entry].skb = NULL;
5786 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5788 if (pci_dma_mapping_error(tp->pdev, mapping))
5791 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5793 would_hit_hwbug = 1;
5795 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5796 tg3_4g_overflow_test(mapping, len))
5797 would_hit_hwbug = 1;
5799 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5800 tg3_40bit_overflow_test(tp, mapping, len))
5801 would_hit_hwbug = 1;
5803 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5804 tg3_set_txd(tnapi, entry, mapping, len,
5805 base_flags, (i == last)|(mss << 1));
5807 tg3_set_txd(tnapi, entry, mapping, len,
5808 base_flags, (i == last));
5810 entry = NEXT_TX(entry);
5814 if (would_hit_hwbug) {
5815 u32 last_plus_one = entry;
5818 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5819 start &= (TG3_TX_RING_SIZE - 1);
5821 /* If the workaround fails due to memory/mapping
5822 * failure, silently drop this packet.
5824 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5825 &start, base_flags, mss))
5831 /* Packets are ready, update Tx producer idx local and on card. */
5832 tw32_tx_mbox(tnapi->prodmbox, entry);
5834 tnapi->tx_prod = entry;
5835 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5836 netif_tx_stop_queue(txq);
5837 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5838 netif_tx_wake_queue(txq);
5844 return NETDEV_TX_OK;
5848 entry = tnapi->tx_prod;
5849 tnapi->tx_buffers[entry].skb = NULL;
5850 pci_unmap_single(tp->pdev,
5851 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5854 for (i = 0; i <= last; i++) {
5855 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5856 entry = NEXT_TX(entry);
5858 pci_unmap_page(tp->pdev,
5859 pci_unmap_addr(&tnapi->tx_buffers[entry],
5861 frag->size, PCI_DMA_TODEVICE);
5865 return NETDEV_TX_OK;
5868 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5873 if (new_mtu > ETH_DATA_LEN) {
5874 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5875 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5876 ethtool_op_set_tso(dev, 0);
5879 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5881 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5882 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5883 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5887 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5889 struct tg3 *tp = netdev_priv(dev);
5892 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5895 if (!netif_running(dev)) {
5896 /* We'll just catch it later when the
5899 tg3_set_mtu(dev, tp, new_mtu);
5907 tg3_full_lock(tp, 1);
5909 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5911 tg3_set_mtu(dev, tp, new_mtu);
5913 err = tg3_restart_hw(tp, 0);
5916 tg3_netif_start(tp);
5918 tg3_full_unlock(tp);
5926 static void tg3_rx_prodring_free(struct tg3 *tp,
5927 struct tg3_rx_prodring_set *tpr)
5931 if (tpr != &tp->prodring[0]) {
5932 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5933 i = (i + 1) % TG3_RX_RING_SIZE)
5934 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5937 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5938 for (i = tpr->rx_jmb_cons_idx;
5939 i != tpr->rx_jmb_prod_idx;
5940 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5941 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5949 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5950 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5953 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5954 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
5955 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5960 /* Initialize tx/rx rings for packet processing.
5962 * The chip has been shut down and the driver detached from
5963 * the networking, so no interrupts or new tx packets will
5964 * end up in the driver. tp->{tx,}lock are held and thus
5967 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5968 struct tg3_rx_prodring_set *tpr)
5970 u32 i, rx_pkt_dma_sz;
5972 tpr->rx_std_cons_idx = 0;
5973 tpr->rx_std_prod_idx = 0;
5974 tpr->rx_jmb_cons_idx = 0;
5975 tpr->rx_jmb_prod_idx = 0;
5977 if (tpr != &tp->prodring[0]) {
5978 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5979 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
5980 memset(&tpr->rx_jmb_buffers[0], 0,
5981 TG3_RX_JMB_BUFF_RING_SIZE);
5985 /* Zero out all descriptors. */
5986 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5988 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5989 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5990 tp->dev->mtu > ETH_DATA_LEN)
5991 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5992 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5994 /* Initialize invariants of the rings, we only set this
5995 * stuff once. This works because the card does not
5996 * write into the rx buffer posting rings.
5998 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5999 struct tg3_rx_buffer_desc *rxd;
6001 rxd = &tpr->rx_std[i];
6002 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6003 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6004 rxd->opaque = (RXD_OPAQUE_RING_STD |
6005 (i << RXD_OPAQUE_INDEX_SHIFT));
6008 /* Now allocate fresh SKBs for each rx ring. */
6009 for (i = 0; i < tp->rx_pending; i++) {
6010 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6011 printk(KERN_WARNING PFX
6012 "%s: Using a smaller RX standard ring, "
6013 "only %d out of %d buffers were allocated "
6015 tp->dev->name, i, tp->rx_pending);
6023 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6026 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6028 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6029 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6030 struct tg3_rx_buffer_desc *rxd;
6032 rxd = &tpr->rx_jmb[i].std;
6033 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6034 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6036 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6037 (i << RXD_OPAQUE_INDEX_SHIFT));
6040 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6041 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
6043 printk(KERN_WARNING PFX
6044 "%s: Using a smaller RX jumbo ring, "
6045 "only %d out of %d buffers were "
6046 "allocated successfully.\n",
6047 tp->dev->name, i, tp->rx_jumbo_pending);
6050 tp->rx_jumbo_pending = i;
6060 tg3_rx_prodring_free(tp, tpr);
6064 static void tg3_rx_prodring_fini(struct tg3 *tp,
6065 struct tg3_rx_prodring_set *tpr)
6067 kfree(tpr->rx_std_buffers);
6068 tpr->rx_std_buffers = NULL;
6069 kfree(tpr->rx_jmb_buffers);
6070 tpr->rx_jmb_buffers = NULL;
6072 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6073 tpr->rx_std, tpr->rx_std_mapping);
6077 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6078 tpr->rx_jmb, tpr->rx_jmb_mapping);
6083 static int tg3_rx_prodring_init(struct tg3 *tp,
6084 struct tg3_rx_prodring_set *tpr)
6086 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6087 if (!tpr->rx_std_buffers)
6090 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6091 &tpr->rx_std_mapping);
6095 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6096 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6098 if (!tpr->rx_jmb_buffers)
6101 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6102 TG3_RX_JUMBO_RING_BYTES,
6103 &tpr->rx_jmb_mapping);
6111 tg3_rx_prodring_fini(tp, tpr);
6115 /* Free up pending packets in all rx/tx rings.
6117 * The chip has been shut down and the driver detached from
6118 * the networking, so no interrupts or new tx packets will
6119 * end up in the driver. tp->{tx,}lock is not held and we are not
6120 * in an interrupt context and thus may sleep.
6122 static void tg3_free_rings(struct tg3 *tp)
6126 for (j = 0; j < tp->irq_cnt; j++) {
6127 struct tg3_napi *tnapi = &tp->napi[j];
6129 if (!tnapi->tx_buffers)
6132 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6133 struct ring_info *txp;
6134 struct sk_buff *skb;
6137 txp = &tnapi->tx_buffers[i];
6145 pci_unmap_single(tp->pdev,
6146 pci_unmap_addr(txp, mapping),
6153 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6154 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6155 pci_unmap_page(tp->pdev,
6156 pci_unmap_addr(txp, mapping),
6157 skb_shinfo(skb)->frags[k].size,
6162 dev_kfree_skb_any(skb);
6165 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
6166 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6170 /* Initialize tx/rx rings for packet processing.
6172 * The chip has been shut down and the driver detached from
6173 * the networking, so no interrupts or new tx packets will
6174 * end up in the driver. tp->{tx,}lock are held and thus
6177 static int tg3_init_rings(struct tg3 *tp)
6181 /* Free up all the SKBs. */
6184 for (i = 0; i < tp->irq_cnt; i++) {
6185 struct tg3_napi *tnapi = &tp->napi[i];
6187 tnapi->last_tag = 0;
6188 tnapi->last_irq_tag = 0;
6189 tnapi->hw_status->status = 0;
6190 tnapi->hw_status->status_tag = 0;
6191 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6196 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6198 tnapi->rx_rcb_ptr = 0;
6200 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6202 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
6203 tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
6211 * Must not be invoked with interrupt sources disabled and
6212 * the hardware shutdown down.
6214 static void tg3_free_consistent(struct tg3 *tp)
6218 for (i = 0; i < tp->irq_cnt; i++) {
6219 struct tg3_napi *tnapi = &tp->napi[i];
6221 if (tnapi->tx_ring) {
6222 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6223 tnapi->tx_ring, tnapi->tx_desc_mapping);
6224 tnapi->tx_ring = NULL;
6227 kfree(tnapi->tx_buffers);
6228 tnapi->tx_buffers = NULL;
6230 if (tnapi->rx_rcb) {
6231 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6233 tnapi->rx_rcb_mapping);
6234 tnapi->rx_rcb = NULL;
6237 if (tnapi->hw_status) {
6238 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6240 tnapi->status_mapping);
6241 tnapi->hw_status = NULL;
6246 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6247 tp->hw_stats, tp->stats_mapping);
6248 tp->hw_stats = NULL;
6251 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
6252 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6256 * Must not be invoked with interrupt sources disabled and
6257 * the hardware shutdown down. Can sleep.
6259 static int tg3_alloc_consistent(struct tg3 *tp)
6263 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
6264 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6268 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6269 sizeof(struct tg3_hw_stats),
6270 &tp->stats_mapping);
6274 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6276 for (i = 0; i < tp->irq_cnt; i++) {
6277 struct tg3_napi *tnapi = &tp->napi[i];
6278 struct tg3_hw_status *sblk;
6280 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6282 &tnapi->status_mapping);
6283 if (!tnapi->hw_status)
6286 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6287 sblk = tnapi->hw_status;
6289 /* If multivector TSS is enabled, vector 0 does not handle
6290 * tx interrupts. Don't allocate any resources for it.
6292 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6293 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6294 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6297 if (!tnapi->tx_buffers)
6300 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6302 &tnapi->tx_desc_mapping);
6303 if (!tnapi->tx_ring)
6308 * When RSS is enabled, the status block format changes
6309 * slightly. The "rx_jumbo_consumer", "reserved",
6310 * and "rx_mini_consumer" members get mapped to the
6311 * other three rx return ring producer indexes.
6315 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6318 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6321 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6324 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6328 if (tp->irq_cnt == 1)
6329 tnapi->prodring = &tp->prodring[0];
6331 tnapi->prodring = &tp->prodring[i - 1];
6334 * If multivector RSS is enabled, vector 0 does not handle
6335 * rx or tx interrupts. Don't allocate any resources for it.
6337 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6340 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6341 TG3_RX_RCB_RING_BYTES(tp),
6342 &tnapi->rx_rcb_mapping);
6346 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6352 tg3_free_consistent(tp);
6356 #define MAX_WAIT_CNT 1000
6358 /* To stop a block, clear the enable bit and poll till it
6359 * clears. tp->lock is held.
6361 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6366 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6373 /* We can't enable/disable these bits of the
6374 * 5705/5750, just say success.
6387 for (i = 0; i < MAX_WAIT_CNT; i++) {
6390 if ((val & enable_bit) == 0)
6394 if (i == MAX_WAIT_CNT && !silent) {
6395 printk(KERN_ERR PFX "tg3_stop_block timed out, "
6396 "ofs=%lx enable_bit=%x\n",
6404 /* tp->lock is held. */
6405 static int tg3_abort_hw(struct tg3 *tp, int silent)
6409 tg3_disable_ints(tp);
6411 tp->rx_mode &= ~RX_MODE_ENABLE;
6412 tw32_f(MAC_RX_MODE, tp->rx_mode);
6415 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6416 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6417 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6418 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6419 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6420 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6422 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6423 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6424 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6425 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6426 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6427 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6428 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6430 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6431 tw32_f(MAC_MODE, tp->mac_mode);
6434 tp->tx_mode &= ~TX_MODE_ENABLE;
6435 tw32_f(MAC_TX_MODE, tp->tx_mode);
6437 for (i = 0; i < MAX_WAIT_CNT; i++) {
6439 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6442 if (i >= MAX_WAIT_CNT) {
6443 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6444 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6445 tp->dev->name, tr32(MAC_TX_MODE));
6449 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6450 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6451 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6453 tw32(FTQ_RESET, 0xffffffff);
6454 tw32(FTQ_RESET, 0x00000000);
6456 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6457 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6459 for (i = 0; i < tp->irq_cnt; i++) {
6460 struct tg3_napi *tnapi = &tp->napi[i];
6461 if (tnapi->hw_status)
6462 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6465 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6470 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6475 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6476 if (apedata != APE_SEG_SIG_MAGIC)
6479 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6480 if (!(apedata & APE_FW_STATUS_READY))
6483 /* Wait for up to 1 millisecond for APE to service previous event. */
6484 for (i = 0; i < 10; i++) {
6485 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6488 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6490 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6491 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6492 event | APE_EVENT_STATUS_EVENT_PENDING);
6494 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6496 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6502 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6503 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6506 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6511 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6515 case RESET_KIND_INIT:
6516 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6517 APE_HOST_SEG_SIG_MAGIC);
6518 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6519 APE_HOST_SEG_LEN_MAGIC);
6520 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6521 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6522 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6523 APE_HOST_DRIVER_ID_MAGIC);
6524 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6525 APE_HOST_BEHAV_NO_PHYLOCK);
6527 event = APE_EVENT_STATUS_STATE_START;
6529 case RESET_KIND_SHUTDOWN:
6530 /* With the interface we are currently using,
6531 * APE does not track driver state. Wiping
6532 * out the HOST SEGMENT SIGNATURE forces
6533 * the APE to assume OS absent status.
6535 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6537 event = APE_EVENT_STATUS_STATE_UNLOAD;
6539 case RESET_KIND_SUSPEND:
6540 event = APE_EVENT_STATUS_STATE_SUSPEND;
6546 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6548 tg3_ape_send_event(tp, event);
6551 /* tp->lock is held. */
6552 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6554 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6555 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6557 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6559 case RESET_KIND_INIT:
6560 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6564 case RESET_KIND_SHUTDOWN:
6565 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6569 case RESET_KIND_SUSPEND:
6570 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6579 if (kind == RESET_KIND_INIT ||
6580 kind == RESET_KIND_SUSPEND)
6581 tg3_ape_driver_state_change(tp, kind);
6584 /* tp->lock is held. */
6585 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6587 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6589 case RESET_KIND_INIT:
6590 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6591 DRV_STATE_START_DONE);
6594 case RESET_KIND_SHUTDOWN:
6595 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6596 DRV_STATE_UNLOAD_DONE);
6604 if (kind == RESET_KIND_SHUTDOWN)
6605 tg3_ape_driver_state_change(tp, kind);
6608 /* tp->lock is held. */
6609 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6611 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6613 case RESET_KIND_INIT:
6614 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6618 case RESET_KIND_SHUTDOWN:
6619 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6623 case RESET_KIND_SUSPEND:
6624 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6634 static int tg3_poll_fw(struct tg3 *tp)
6639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6640 /* Wait up to 20ms for init done. */
6641 for (i = 0; i < 200; i++) {
6642 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6649 /* Wait for firmware initialization to complete. */
6650 for (i = 0; i < 100000; i++) {
6651 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6652 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6657 /* Chip might not be fitted with firmware. Some Sun onboard
6658 * parts are configured like that. So don't signal the timeout
6659 * of the above loop as an error, but do report the lack of
6660 * running firmware once.
6663 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6664 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6666 printk(KERN_INFO PFX "%s: No firmware running.\n",
6673 /* Save PCI command register before chip reset */
6674 static void tg3_save_pci_state(struct tg3 *tp)
6676 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6679 /* Restore PCI state after chip reset */
6680 static void tg3_restore_pci_state(struct tg3 *tp)
6684 /* Re-enable indirect register accesses. */
6685 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6686 tp->misc_host_ctrl);
6688 /* Set MAX PCI retry to zero. */
6689 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6690 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6691 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6692 val |= PCISTATE_RETRY_SAME_DMA;
6693 /* Allow reads and writes to the APE register and memory space. */
6694 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6695 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6696 PCISTATE_ALLOW_APE_SHMEM_WR;
6697 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6699 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6701 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6702 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6703 pcie_set_readrq(tp->pdev, 4096);
6705 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6706 tp->pci_cacheline_sz);
6707 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6712 /* Make sure PCI-X relaxed ordering bit is clear. */
6713 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6716 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6718 pcix_cmd &= ~PCI_X_CMD_ERO;
6719 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6723 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6725 /* Chip reset on 5780 will reset MSI enable bit,
6726 * so need to restore it.
6728 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6731 pci_read_config_word(tp->pdev,
6732 tp->msi_cap + PCI_MSI_FLAGS,
6734 pci_write_config_word(tp->pdev,
6735 tp->msi_cap + PCI_MSI_FLAGS,
6736 ctrl | PCI_MSI_FLAGS_ENABLE);
6737 val = tr32(MSGINT_MODE);
6738 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6743 static void tg3_stop_fw(struct tg3 *);
6745 /* tp->lock is held. */
6746 static int tg3_chip_reset(struct tg3 *tp)
6749 void (*write_op)(struct tg3 *, u32, u32);
6754 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6756 /* No matching tg3_nvram_unlock() after this because
6757 * chip reset below will undo the nvram lock.
6759 tp->nvram_lock_cnt = 0;
6761 /* GRC_MISC_CFG core clock reset will clear the memory
6762 * enable bit in PCI register 4 and the MSI enable bit
6763 * on some chips, so we save relevant registers here.
6765 tg3_save_pci_state(tp);
6767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6768 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6769 tw32(GRC_FASTBOOT_PC, 0);
6772 * We must avoid the readl() that normally takes place.
6773 * It locks machines, causes machine checks, and other
6774 * fun things. So, temporarily disable the 5701
6775 * hardware workaround, while we do the reset.
6777 write_op = tp->write32;
6778 if (write_op == tg3_write_flush_reg32)
6779 tp->write32 = tg3_write32;
6781 /* Prevent the irq handler from reading or writing PCI registers
6782 * during chip reset when the memory enable bit in the PCI command
6783 * register may be cleared. The chip does not generate interrupt
6784 * at this time, but the irq handler may still be called due to irq
6785 * sharing or irqpoll.
6787 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6788 for (i = 0; i < tp->irq_cnt; i++) {
6789 struct tg3_napi *tnapi = &tp->napi[i];
6790 if (tnapi->hw_status) {
6791 tnapi->hw_status->status = 0;
6792 tnapi->hw_status->status_tag = 0;
6794 tnapi->last_tag = 0;
6795 tnapi->last_irq_tag = 0;
6799 for (i = 0; i < tp->irq_cnt; i++)
6800 synchronize_irq(tp->napi[i].irq_vec);
6802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6803 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6804 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6808 val = GRC_MISC_CFG_CORECLK_RESET;
6810 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6811 if (tr32(0x7e2c) == 0x60) {
6814 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6815 tw32(GRC_MISC_CFG, (1 << 29));
6820 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6821 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6822 tw32(GRC_VCPU_EXT_CTRL,
6823 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6826 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6827 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6828 tw32(GRC_MISC_CFG, val);
6830 /* restore 5701 hardware bug workaround write method */
6831 tp->write32 = write_op;
6833 /* Unfortunately, we have to delay before the PCI read back.
6834 * Some 575X chips even will not respond to a PCI cfg access
6835 * when the reset command is given to the chip.
6837 * How do these hardware designers expect things to work
6838 * properly if the PCI write is posted for a long period
6839 * of time? It is always necessary to have some method by
6840 * which a register read back can occur to push the write
6841 * out which does the reset.
6843 * For most tg3 variants the trick below was working.
6848 /* Flush PCI posted writes. The normal MMIO registers
6849 * are inaccessible at this time so this is the only
6850 * way to make this reliably (actually, this is no longer
6851 * the case, see above). I tried to use indirect
6852 * register read/write but this upset some 5701 variants.
6854 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6858 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6861 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6865 /* Wait for link training to complete. */
6866 for (i = 0; i < 5000; i++)
6869 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6870 pci_write_config_dword(tp->pdev, 0xc4,
6871 cfg_val | (1 << 15));
6874 /* Clear the "no snoop" and "relaxed ordering" bits. */
6875 pci_read_config_word(tp->pdev,
6876 tp->pcie_cap + PCI_EXP_DEVCTL,
6878 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6879 PCI_EXP_DEVCTL_NOSNOOP_EN);
6881 * Older PCIe devices only support the 128 byte
6882 * MPS setting. Enforce the restriction.
6884 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6885 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6886 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6887 pci_write_config_word(tp->pdev,
6888 tp->pcie_cap + PCI_EXP_DEVCTL,
6891 pcie_set_readrq(tp->pdev, 4096);
6893 /* Clear error status */
6894 pci_write_config_word(tp->pdev,
6895 tp->pcie_cap + PCI_EXP_DEVSTA,
6896 PCI_EXP_DEVSTA_CED |
6897 PCI_EXP_DEVSTA_NFED |
6898 PCI_EXP_DEVSTA_FED |
6899 PCI_EXP_DEVSTA_URD);
6902 tg3_restore_pci_state(tp);
6904 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6907 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6908 val = tr32(MEMARB_MODE);
6909 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6911 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6913 tw32(0x5000, 0x400);
6916 tw32(GRC_MODE, tp->grc_mode);
6918 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6921 tw32(0xc4, val | (1 << 15));
6924 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6926 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6927 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6928 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6929 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6932 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6933 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6934 tw32_f(MAC_MODE, tp->mac_mode);
6935 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6936 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6937 tw32_f(MAC_MODE, tp->mac_mode);
6938 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6939 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6940 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6941 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6942 tw32_f(MAC_MODE, tp->mac_mode);
6944 tw32_f(MAC_MODE, 0);
6947 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6949 err = tg3_poll_fw(tp);
6955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6958 phy_addr = tp->phy_addr;
6959 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6961 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6962 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6963 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6964 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6965 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6966 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6969 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6970 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6971 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6972 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6973 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6976 tp->phy_addr = phy_addr;
6979 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6980 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6981 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6982 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
6983 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
6986 tw32(0x7c00, val | (1 << 25));
6989 /* Reprobe ASF enable state. */
6990 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6991 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6992 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6993 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6996 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6997 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6998 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6999 tp->last_event_jiffies = jiffies;
7000 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7001 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7008 /* tp->lock is held. */
7009 static void tg3_stop_fw(struct tg3 *tp)
7011 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7012 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7013 /* Wait for RX cpu to ACK the previous event. */
7014 tg3_wait_for_event_ack(tp);
7016 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7018 tg3_generate_fw_event(tp);
7020 /* Wait for RX cpu to ACK this event. */
7021 tg3_wait_for_event_ack(tp);
7025 /* tp->lock is held. */
7026 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7032 tg3_write_sig_pre_reset(tp, kind);
7034 tg3_abort_hw(tp, silent);
7035 err = tg3_chip_reset(tp);
7037 __tg3_set_mac_addr(tp, 0);
7039 tg3_write_sig_legacy(tp, kind);
7040 tg3_write_sig_post_reset(tp, kind);
7048 #define RX_CPU_SCRATCH_BASE 0x30000
7049 #define RX_CPU_SCRATCH_SIZE 0x04000
7050 #define TX_CPU_SCRATCH_BASE 0x34000
7051 #define TX_CPU_SCRATCH_SIZE 0x04000
7053 /* tp->lock is held. */
7054 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7058 BUG_ON(offset == TX_CPU_BASE &&
7059 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7062 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7064 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7067 if (offset == RX_CPU_BASE) {
7068 for (i = 0; i < 10000; i++) {
7069 tw32(offset + CPU_STATE, 0xffffffff);
7070 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7071 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7075 tw32(offset + CPU_STATE, 0xffffffff);
7076 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7079 for (i = 0; i < 10000; i++) {
7080 tw32(offset + CPU_STATE, 0xffffffff);
7081 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7082 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7088 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
7091 (offset == RX_CPU_BASE ? "RX" : "TX"));
7095 /* Clear firmware's nvram arbitration. */
7096 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7097 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7102 unsigned int fw_base;
7103 unsigned int fw_len;
7104 const __be32 *fw_data;
7107 /* tp->lock is held. */
7108 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7109 int cpu_scratch_size, struct fw_info *info)
7111 int err, lock_err, i;
7112 void (*write_op)(struct tg3 *, u32, u32);
7114 if (cpu_base == TX_CPU_BASE &&
7115 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7116 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
7117 "TX cpu firmware on %s which is 5705.\n",
7122 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7123 write_op = tg3_write_mem;
7125 write_op = tg3_write_indirect_reg32;
7127 /* It is possible that bootcode is still loading at this point.
7128 * Get the nvram lock first before halting the cpu.
7130 lock_err = tg3_nvram_lock(tp);
7131 err = tg3_halt_cpu(tp, cpu_base);
7133 tg3_nvram_unlock(tp);
7137 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7138 write_op(tp, cpu_scratch_base + i, 0);
7139 tw32(cpu_base + CPU_STATE, 0xffffffff);
7140 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7141 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7142 write_op(tp, (cpu_scratch_base +
7143 (info->fw_base & 0xffff) +
7145 be32_to_cpu(info->fw_data[i]));
7153 /* tp->lock is held. */
7154 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7156 struct fw_info info;
7157 const __be32 *fw_data;
7160 fw_data = (void *)tp->fw->data;
7162 /* Firmware blob starts with version numbers, followed by
7163 start address and length. We are setting complete length.
7164 length = end_address_of_bss - start_address_of_text.
7165 Remainder is the blob to be loaded contiguously
7166 from start address. */
7168 info.fw_base = be32_to_cpu(fw_data[1]);
7169 info.fw_len = tp->fw->size - 12;
7170 info.fw_data = &fw_data[3];
7172 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7173 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7178 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7179 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7184 /* Now startup only the RX cpu. */
7185 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7186 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7188 for (i = 0; i < 5; i++) {
7189 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7191 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7192 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7193 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7197 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
7198 "to set RX CPU PC, is %08x should be %08x\n",
7199 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
7203 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7204 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7209 /* 5705 needs a special version of the TSO firmware. */
7211 /* tp->lock is held. */
7212 static int tg3_load_tso_firmware(struct tg3 *tp)
7214 struct fw_info info;
7215 const __be32 *fw_data;
7216 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7219 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7222 fw_data = (void *)tp->fw->data;
7224 /* Firmware blob starts with version numbers, followed by
7225 start address and length. We are setting complete length.
7226 length = end_address_of_bss - start_address_of_text.
7227 Remainder is the blob to be loaded contiguously
7228 from start address. */
7230 info.fw_base = be32_to_cpu(fw_data[1]);
7231 cpu_scratch_size = tp->fw_len;
7232 info.fw_len = tp->fw->size - 12;
7233 info.fw_data = &fw_data[3];
7235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7236 cpu_base = RX_CPU_BASE;
7237 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7239 cpu_base = TX_CPU_BASE;
7240 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7241 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7244 err = tg3_load_firmware_cpu(tp, cpu_base,
7245 cpu_scratch_base, cpu_scratch_size,
7250 /* Now startup the cpu. */
7251 tw32(cpu_base + CPU_STATE, 0xffffffff);
7252 tw32_f(cpu_base + CPU_PC, info.fw_base);
7254 for (i = 0; i < 5; i++) {
7255 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7257 tw32(cpu_base + CPU_STATE, 0xffffffff);
7258 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7259 tw32_f(cpu_base + CPU_PC, info.fw_base);
7263 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7264 "to set CPU PC, is %08x should be %08x\n",
7265 tp->dev->name, tr32(cpu_base + CPU_PC),
7269 tw32(cpu_base + CPU_STATE, 0xffffffff);
7270 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7275 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7277 struct tg3 *tp = netdev_priv(dev);
7278 struct sockaddr *addr = p;
7279 int err = 0, skip_mac_1 = 0;
7281 if (!is_valid_ether_addr(addr->sa_data))
7284 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7286 if (!netif_running(dev))
7289 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7290 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7292 addr0_high = tr32(MAC_ADDR_0_HIGH);
7293 addr0_low = tr32(MAC_ADDR_0_LOW);
7294 addr1_high = tr32(MAC_ADDR_1_HIGH);
7295 addr1_low = tr32(MAC_ADDR_1_LOW);
7297 /* Skip MAC addr 1 if ASF is using it. */
7298 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7299 !(addr1_high == 0 && addr1_low == 0))
7302 spin_lock_bh(&tp->lock);
7303 __tg3_set_mac_addr(tp, skip_mac_1);
7304 spin_unlock_bh(&tp->lock);
7309 /* tp->lock is held. */
7310 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7311 dma_addr_t mapping, u32 maxlen_flags,
7315 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7316 ((u64) mapping >> 32));
7318 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7319 ((u64) mapping & 0xffffffff));
7321 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7324 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7326 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7330 static void __tg3_set_rx_mode(struct net_device *);
7331 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7335 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7336 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7337 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7338 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7340 tw32(HOSTCC_TXCOL_TICKS, 0);
7341 tw32(HOSTCC_TXMAX_FRAMES, 0);
7342 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7345 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
7346 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7347 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7348 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7350 tw32(HOSTCC_RXCOL_TICKS, 0);
7351 tw32(HOSTCC_RXMAX_FRAMES, 0);
7352 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7355 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7356 u32 val = ec->stats_block_coalesce_usecs;
7358 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7359 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7361 if (!netif_carrier_ok(tp->dev))
7364 tw32(HOSTCC_STAT_COAL_TICKS, val);
7367 for (i = 0; i < tp->irq_cnt - 1; i++) {
7370 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7371 tw32(reg, ec->rx_coalesce_usecs);
7372 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7373 tw32(reg, ec->rx_max_coalesced_frames);
7374 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7375 tw32(reg, ec->rx_max_coalesced_frames_irq);
7377 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7378 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7379 tw32(reg, ec->tx_coalesce_usecs);
7380 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7381 tw32(reg, ec->tx_max_coalesced_frames);
7382 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7383 tw32(reg, ec->tx_max_coalesced_frames_irq);
7387 for (; i < tp->irq_max - 1; i++) {
7388 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7389 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7390 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7392 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7393 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7394 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7395 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7400 /* tp->lock is held. */
7401 static void tg3_rings_reset(struct tg3 *tp)
7404 u32 stblk, txrcb, rxrcb, limit;
7405 struct tg3_napi *tnapi = &tp->napi[0];
7407 /* Disable all transmit rings but the first. */
7408 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7409 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7410 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7411 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7413 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7415 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7416 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7417 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7418 BDINFO_FLAGS_DISABLED);
7421 /* Disable all receive return rings but the first. */
7422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7423 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7424 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7425 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7426 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7428 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7430 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7432 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7433 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7434 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7435 BDINFO_FLAGS_DISABLED);
7437 /* Disable interrupts */
7438 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7440 /* Zero mailbox registers. */
7441 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7442 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7443 tp->napi[i].tx_prod = 0;
7444 tp->napi[i].tx_cons = 0;
7445 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7446 tw32_mailbox(tp->napi[i].prodmbox, 0);
7447 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7448 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7450 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7451 tw32_mailbox(tp->napi[0].prodmbox, 0);
7453 tp->napi[0].tx_prod = 0;
7454 tp->napi[0].tx_cons = 0;
7455 tw32_mailbox(tp->napi[0].prodmbox, 0);
7456 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7459 /* Make sure the NIC-based send BD rings are disabled. */
7460 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7461 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7462 for (i = 0; i < 16; i++)
7463 tw32_tx_mbox(mbox + i * 8, 0);
7466 txrcb = NIC_SRAM_SEND_RCB;
7467 rxrcb = NIC_SRAM_RCV_RET_RCB;
7469 /* Clear status block in ram. */
7470 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7472 /* Set status block DMA address */
7473 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7474 ((u64) tnapi->status_mapping >> 32));
7475 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7476 ((u64) tnapi->status_mapping & 0xffffffff));
7478 if (tnapi->tx_ring) {
7479 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7480 (TG3_TX_RING_SIZE <<
7481 BDINFO_FLAGS_MAXLEN_SHIFT),
7482 NIC_SRAM_TX_BUFFER_DESC);
7483 txrcb += TG3_BDINFO_SIZE;
7486 if (tnapi->rx_rcb) {
7487 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7488 (TG3_RX_RCB_RING_SIZE(tp) <<
7489 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7490 rxrcb += TG3_BDINFO_SIZE;
7493 stblk = HOSTCC_STATBLCK_RING1;
7495 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7496 u64 mapping = (u64)tnapi->status_mapping;
7497 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7498 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7500 /* Clear status block in ram. */
7501 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7503 if (tnapi->tx_ring) {
7504 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7505 (TG3_TX_RING_SIZE <<
7506 BDINFO_FLAGS_MAXLEN_SHIFT),
7507 NIC_SRAM_TX_BUFFER_DESC);
7508 txrcb += TG3_BDINFO_SIZE;
7511 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7512 (TG3_RX_RCB_RING_SIZE(tp) <<
7513 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7516 rxrcb += TG3_BDINFO_SIZE;
7520 /* tp->lock is held. */
7521 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7523 u32 val, rdmac_mode;
7525 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7527 tg3_disable_ints(tp);
7531 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7533 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7534 tg3_abort_hw(tp, 1);
7538 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7541 err = tg3_chip_reset(tp);
7545 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7547 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7548 val = tr32(TG3_CPMU_CTRL);
7549 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7550 tw32(TG3_CPMU_CTRL, val);
7552 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7553 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7554 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7555 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7557 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7558 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7559 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7560 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7562 val = tr32(TG3_CPMU_HST_ACC);
7563 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7564 val |= CPMU_HST_ACC_MACCLK_6_25;
7565 tw32(TG3_CPMU_HST_ACC, val);
7568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7569 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7570 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7571 PCIE_PWR_MGMT_L1_THRESH_4MS;
7572 tw32(PCIE_PWR_MGMT_THRESH, val);
7574 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7575 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7577 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7579 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7580 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7583 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7584 u32 grc_mode = tr32(GRC_MODE);
7586 /* Access the lower 1K of PL PCIE block registers. */
7587 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7588 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7590 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7591 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7592 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7594 tw32(GRC_MODE, grc_mode);
7597 /* This works around an issue with Athlon chipsets on
7598 * B3 tigon3 silicon. This bit has no effect on any
7599 * other revision. But do not set this on PCI Express
7600 * chips and don't even touch the clocks if the CPMU is present.
7602 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7603 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7604 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7605 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7608 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7609 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7610 val = tr32(TG3PCI_PCISTATE);
7611 val |= PCISTATE_RETRY_SAME_DMA;
7612 tw32(TG3PCI_PCISTATE, val);
7615 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7616 /* Allow reads and writes to the
7617 * APE register and memory space.
7619 val = tr32(TG3PCI_PCISTATE);
7620 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7621 PCISTATE_ALLOW_APE_SHMEM_WR;
7622 tw32(TG3PCI_PCISTATE, val);
7625 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7626 /* Enable some hw fixes. */
7627 val = tr32(TG3PCI_MSI_DATA);
7628 val |= (1 << 26) | (1 << 28) | (1 << 29);
7629 tw32(TG3PCI_MSI_DATA, val);
7632 /* Descriptor ring init may make accesses to the
7633 * NIC SRAM area to setup the TX descriptors, so we
7634 * can only do this after the hardware has been
7635 * successfully reset.
7637 err = tg3_init_rings(tp);
7641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7643 val = tr32(TG3PCI_DMA_RW_CTRL) &
7644 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7645 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7646 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7647 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7648 /* This value is determined during the probe time DMA
7649 * engine test, tg3_test_dma.
7651 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7654 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7655 GRC_MODE_4X_NIC_SEND_RINGS |
7656 GRC_MODE_NO_TX_PHDR_CSUM |
7657 GRC_MODE_NO_RX_PHDR_CSUM);
7658 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7660 /* Pseudo-header checksum is done by hardware logic and not
7661 * the offload processers, so make the chip do the pseudo-
7662 * header checksums on receive. For transmit it is more
7663 * convenient to do the pseudo-header checksum in software
7664 * as Linux does that on transmit for us in all cases.
7666 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7670 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7672 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7673 val = tr32(GRC_MISC_CFG);
7675 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7676 tw32(GRC_MISC_CFG, val);
7678 /* Initialize MBUF/DESC pool. */
7679 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7681 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7682 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7684 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7686 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7687 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7688 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7690 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7693 fw_len = tp->fw_len;
7694 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7695 tw32(BUFMGR_MB_POOL_ADDR,
7696 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7697 tw32(BUFMGR_MB_POOL_SIZE,
7698 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7701 if (tp->dev->mtu <= ETH_DATA_LEN) {
7702 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7703 tp->bufmgr_config.mbuf_read_dma_low_water);
7704 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7705 tp->bufmgr_config.mbuf_mac_rx_low_water);
7706 tw32(BUFMGR_MB_HIGH_WATER,
7707 tp->bufmgr_config.mbuf_high_water);
7709 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7710 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7711 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7712 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7713 tw32(BUFMGR_MB_HIGH_WATER,
7714 tp->bufmgr_config.mbuf_high_water_jumbo);
7716 tw32(BUFMGR_DMA_LOW_WATER,
7717 tp->bufmgr_config.dma_low_water);
7718 tw32(BUFMGR_DMA_HIGH_WATER,
7719 tp->bufmgr_config.dma_high_water);
7721 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7722 for (i = 0; i < 2000; i++) {
7723 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7728 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7733 /* Setup replenish threshold. */
7734 val = tp->rx_pending / 8;
7737 else if (val > tp->rx_std_max_post)
7738 val = tp->rx_std_max_post;
7739 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7740 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7741 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7743 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7744 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7747 tw32(RCVBDI_STD_THRESH, val);
7749 /* Initialize TG3_BDINFO's at:
7750 * RCVDBDI_STD_BD: standard eth size rx ring
7751 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7752 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7755 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7756 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7757 * ring attribute flags
7758 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7760 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7761 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7763 * The size of each ring is fixed in the firmware, but the location is
7766 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7767 ((u64) tpr->rx_std_mapping >> 32));
7768 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7769 ((u64) tpr->rx_std_mapping & 0xffffffff));
7770 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7771 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7772 NIC_SRAM_RX_BUFFER_DESC);
7774 /* Disable the mini ring */
7775 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7776 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7777 BDINFO_FLAGS_DISABLED);
7779 /* Program the jumbo buffer descriptor ring control
7780 * blocks on those devices that have them.
7782 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7783 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7784 /* Setup replenish threshold. */
7785 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7787 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7788 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7789 ((u64) tpr->rx_jmb_mapping >> 32));
7790 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7791 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7792 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7793 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7794 BDINFO_FLAGS_USE_EXT_RECV);
7795 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7796 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7797 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7799 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7800 BDINFO_FLAGS_DISABLED);
7803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7805 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7806 (RX_STD_MAX_SIZE << 2);
7808 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7810 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7812 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7814 tpr->rx_std_prod_idx = tp->rx_pending;
7815 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7817 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7818 tp->rx_jumbo_pending : 0;
7819 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7823 tw32(STD_REPLENISH_LWM, 32);
7824 tw32(JMB_REPLENISH_LWM, 16);
7827 tg3_rings_reset(tp);
7829 /* Initialize MAC address and backoff seed. */
7830 __tg3_set_mac_addr(tp, 0);
7832 /* MTU + ethernet header + FCS + optional VLAN tag */
7833 tw32(MAC_RX_MTU_SIZE,
7834 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7836 /* The slot time is changed by tg3_setup_phy if we
7837 * run at gigabit with half duplex.
7839 tw32(MAC_TX_LENGTHS,
7840 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7841 (6 << TX_LENGTHS_IPG_SHIFT) |
7842 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7844 /* Receive rules. */
7845 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7846 tw32(RCVLPC_CONFIG, 0x0181);
7848 /* Calculate RDMAC_MODE setting early, we need it to determine
7849 * the RCVLPC_STATE_ENABLE mask.
7851 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7852 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7853 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7854 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7855 RDMAC_MODE_LNGREAD_ENAB);
7857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7859 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7860 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7861 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7862 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7864 /* If statement applies to 5705 and 5750 PCI devices only */
7865 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7866 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7867 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7868 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7870 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7871 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7872 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7873 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7877 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7878 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7880 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7881 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7883 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7886 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7888 /* Receive/send statistics. */
7889 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7890 val = tr32(RCVLPC_STATS_ENABLE);
7891 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7892 tw32(RCVLPC_STATS_ENABLE, val);
7893 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7894 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7895 val = tr32(RCVLPC_STATS_ENABLE);
7896 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7897 tw32(RCVLPC_STATS_ENABLE, val);
7899 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7901 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7902 tw32(SNDDATAI_STATSENAB, 0xffffff);
7903 tw32(SNDDATAI_STATSCTRL,
7904 (SNDDATAI_SCTRL_ENABLE |
7905 SNDDATAI_SCTRL_FASTUPD));
7907 /* Setup host coalescing engine. */
7908 tw32(HOSTCC_MODE, 0);
7909 for (i = 0; i < 2000; i++) {
7910 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7915 __tg3_set_coalesce(tp, &tp->coal);
7917 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7918 /* Status/statistics block address. See tg3_timer,
7919 * the tg3_periodic_fetch_stats call there, and
7920 * tg3_get_stats to see how this works for 5705/5750 chips.
7922 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7923 ((u64) tp->stats_mapping >> 32));
7924 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7925 ((u64) tp->stats_mapping & 0xffffffff));
7926 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7928 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7930 /* Clear statistics and status block memory areas */
7931 for (i = NIC_SRAM_STATS_BLK;
7932 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7934 tg3_write_mem(tp, i, 0);
7939 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7941 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7942 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7943 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7944 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7946 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7947 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7948 /* reset to prevent losing 1st rx packet intermittently */
7949 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7953 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7954 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7957 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7958 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7959 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7960 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7961 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7962 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7963 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7966 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7967 * If TG3_FLG2_IS_NIC is zero, we should read the
7968 * register to preserve the GPIO settings for LOMs. The GPIOs,
7969 * whether used as inputs or outputs, are set by boot code after
7972 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7975 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7976 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7977 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7980 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7981 GRC_LCLCTRL_GPIO_OUTPUT3;
7983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7984 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7986 tp->grc_local_ctrl &= ~gpio_mask;
7987 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7989 /* GPIO1 must be driven high for eeprom write protect */
7990 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7991 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7992 GRC_LCLCTRL_GPIO_OUTPUT1);
7994 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7997 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
7998 val = tr32(MSGINT_MODE);
7999 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8000 tw32(MSGINT_MODE, val);
8003 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8004 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8008 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8009 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8010 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8011 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8012 WDMAC_MODE_LNGREAD_ENAB);
8014 /* If statement applies to 5705 and 5750 PCI devices only */
8015 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8016 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8018 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8019 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8020 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8022 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8023 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8024 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8025 val |= WDMAC_MODE_RX_ACCEL;
8029 /* Enable host coalescing bug fix */
8030 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8031 val |= WDMAC_MODE_STATUS_TAG_FIX;
8033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8034 val |= WDMAC_MODE_BURST_ALL_DATA;
8036 tw32_f(WDMAC_MODE, val);
8039 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8042 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8045 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8046 pcix_cmd |= PCI_X_CMD_READ_2K;
8047 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8048 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8049 pcix_cmd |= PCI_X_CMD_READ_2K;
8051 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8055 tw32_f(RDMAC_MODE, rdmac_mode);
8058 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8059 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8060 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8064 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8066 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8068 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8069 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8070 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8071 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8072 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8073 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8074 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8075 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8076 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8077 tw32(SNDBDI_MODE, val);
8078 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8080 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8081 err = tg3_load_5701_a0_firmware_fix(tp);
8086 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8087 err = tg3_load_tso_firmware(tp);
8092 tp->tx_mode = TX_MODE_ENABLE;
8093 tw32_f(MAC_TX_MODE, tp->tx_mode);
8096 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8097 u32 reg = MAC_RSS_INDIR_TBL_0;
8098 u8 *ent = (u8 *)&val;
8100 /* Setup the indirection table */
8101 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8102 int idx = i % sizeof(val);
8104 ent[idx] = i % (tp->irq_cnt - 1);
8105 if (idx == sizeof(val) - 1) {
8111 /* Setup the "secret" hash key. */
8112 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8113 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8114 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8115 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8116 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8117 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8118 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8119 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8120 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8121 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8124 tp->rx_mode = RX_MODE_ENABLE;
8125 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8126 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8128 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8129 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8130 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8131 RX_MODE_RSS_IPV6_HASH_EN |
8132 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8133 RX_MODE_RSS_IPV4_HASH_EN |
8134 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8136 tw32_f(MAC_RX_MODE, tp->rx_mode);
8139 tw32(MAC_LED_CTRL, tp->led_ctrl);
8141 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8142 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8143 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8146 tw32_f(MAC_RX_MODE, tp->rx_mode);
8149 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8150 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8151 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
8152 /* Set drive transmission level to 1.2V */
8153 /* only if the signal pre-emphasis bit is not set */
8154 val = tr32(MAC_SERDES_CFG);
8157 tw32(MAC_SERDES_CFG, val);
8159 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8160 tw32(MAC_SERDES_CFG, 0x616000);
8163 /* Prevent chip from dropping frames when flow control
8166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8170 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8173 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8174 /* Use hardware link auto-negotiation */
8175 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8178 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8179 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8182 tmp = tr32(SERDES_RX_CTRL);
8183 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8184 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8185 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8186 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8189 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8190 if (tp->link_config.phy_is_low_power) {
8191 tp->link_config.phy_is_low_power = 0;
8192 tp->link_config.speed = tp->link_config.orig_speed;
8193 tp->link_config.duplex = tp->link_config.orig_duplex;
8194 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8197 err = tg3_setup_phy(tp, 0);
8201 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8202 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
8205 /* Clear CRC stats. */
8206 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8207 tg3_writephy(tp, MII_TG3_TEST1,
8208 tmp | MII_TG3_TEST1_CRC_EN);
8209 tg3_readphy(tp, 0x14, &tmp);
8214 __tg3_set_rx_mode(tp->dev);
8216 /* Initialize receive rules. */
8217 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8218 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8219 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8220 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8222 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8223 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8227 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8231 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8233 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8235 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8237 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8239 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8241 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8243 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8245 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8247 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8249 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8251 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8253 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8255 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8257 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8265 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8266 /* Write our heartbeat update interval to APE. */
8267 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8268 APE_HOST_HEARTBEAT_INT_DISABLE);
8270 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8275 /* Called at device open time to get the chip ready for
8276 * packet processing. Invoked with tp->lock held.
8278 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8280 tg3_switch_clocks(tp);
8282 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8284 return tg3_reset_hw(tp, reset_phy);
8287 #define TG3_STAT_ADD32(PSTAT, REG) \
8288 do { u32 __val = tr32(REG); \
8289 (PSTAT)->low += __val; \
8290 if ((PSTAT)->low < __val) \
8291 (PSTAT)->high += 1; \
8294 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8296 struct tg3_hw_stats *sp = tp->hw_stats;
8298 if (!netif_carrier_ok(tp->dev))
8301 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8302 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8303 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8304 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8305 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8306 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8307 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8308 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8309 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8310 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8311 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8312 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8313 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8315 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8316 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8317 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8318 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8319 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8320 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8321 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8322 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8323 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8324 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8325 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8326 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8327 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8328 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8330 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8331 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8332 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8335 static void tg3_timer(unsigned long __opaque)
8337 struct tg3 *tp = (struct tg3 *) __opaque;
8342 spin_lock(&tp->lock);
8344 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8345 /* All of this garbage is because when using non-tagged
8346 * IRQ status the mailbox/status_block protocol the chip
8347 * uses with the cpu is race prone.
8349 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8350 tw32(GRC_LOCAL_CTRL,
8351 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8353 tw32(HOSTCC_MODE, tp->coalesce_mode |
8354 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8357 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8358 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8359 spin_unlock(&tp->lock);
8360 schedule_work(&tp->reset_task);
8365 /* This part only runs once per second. */
8366 if (!--tp->timer_counter) {
8367 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8368 tg3_periodic_fetch_stats(tp);
8370 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8374 mac_stat = tr32(MAC_STATUS);
8377 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8378 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8380 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8384 tg3_setup_phy(tp, 0);
8385 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8386 u32 mac_stat = tr32(MAC_STATUS);
8389 if (netif_carrier_ok(tp->dev) &&
8390 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8393 if (! netif_carrier_ok(tp->dev) &&
8394 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8395 MAC_STATUS_SIGNAL_DET))) {
8399 if (!tp->serdes_counter) {
8402 ~MAC_MODE_PORT_MODE_MASK));
8404 tw32_f(MAC_MODE, tp->mac_mode);
8407 tg3_setup_phy(tp, 0);
8409 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8410 tg3_serdes_parallel_detect(tp);
8412 tp->timer_counter = tp->timer_multiplier;
8415 /* Heartbeat is only sent once every 2 seconds.
8417 * The heartbeat is to tell the ASF firmware that the host
8418 * driver is still alive. In the event that the OS crashes,
8419 * ASF needs to reset the hardware to free up the FIFO space
8420 * that may be filled with rx packets destined for the host.
8421 * If the FIFO is full, ASF will no longer function properly.
8423 * Unintended resets have been reported on real time kernels
8424 * where the timer doesn't run on time. Netpoll will also have
8427 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8428 * to check the ring condition when the heartbeat is expiring
8429 * before doing the reset. This will prevent most unintended
8432 if (!--tp->asf_counter) {
8433 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8434 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8435 tg3_wait_for_event_ack(tp);
8437 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8438 FWCMD_NICDRV_ALIVE3);
8439 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8440 /* 5 seconds timeout */
8441 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8443 tg3_generate_fw_event(tp);
8445 tp->asf_counter = tp->asf_multiplier;
8448 spin_unlock(&tp->lock);
8451 tp->timer.expires = jiffies + tp->timer_offset;
8452 add_timer(&tp->timer);
8455 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8458 unsigned long flags;
8460 struct tg3_napi *tnapi = &tp->napi[irq_num];
8462 if (tp->irq_cnt == 1)
8463 name = tp->dev->name;
8465 name = &tnapi->irq_lbl[0];
8466 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8467 name[IFNAMSIZ-1] = 0;
8470 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8472 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8474 flags = IRQF_SAMPLE_RANDOM;
8477 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8478 fn = tg3_interrupt_tagged;
8479 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8482 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8485 static int tg3_test_interrupt(struct tg3 *tp)
8487 struct tg3_napi *tnapi = &tp->napi[0];
8488 struct net_device *dev = tp->dev;
8489 int err, i, intr_ok = 0;
8492 if (!netif_running(dev))
8495 tg3_disable_ints(tp);
8497 free_irq(tnapi->irq_vec, tnapi);
8500 * Turn off MSI one shot mode. Otherwise this test has no
8501 * observable way to know whether the interrupt was delivered.
8503 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8504 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8505 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8506 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8507 tw32(MSGINT_MODE, val);
8510 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8511 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8515 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8516 tg3_enable_ints(tp);
8518 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8521 for (i = 0; i < 5; i++) {
8522 u32 int_mbox, misc_host_ctrl;
8524 int_mbox = tr32_mailbox(tnapi->int_mbox);
8525 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8527 if ((int_mbox != 0) ||
8528 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8536 tg3_disable_ints(tp);
8538 free_irq(tnapi->irq_vec, tnapi);
8540 err = tg3_request_irq(tp, 0);
8546 /* Reenable MSI one shot mode. */
8547 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8549 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8550 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8551 tw32(MSGINT_MODE, val);
8559 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8560 * successfully restored
8562 static int tg3_test_msi(struct tg3 *tp)
8567 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8570 /* Turn off SERR reporting in case MSI terminates with Master
8573 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8574 pci_write_config_word(tp->pdev, PCI_COMMAND,
8575 pci_cmd & ~PCI_COMMAND_SERR);
8577 err = tg3_test_interrupt(tp);
8579 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8584 /* other failures */
8588 /* MSI test failed, go back to INTx mode */
8589 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8590 "switching to INTx mode. Please report this failure to "
8591 "the PCI maintainer and include system chipset information.\n",
8594 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8596 pci_disable_msi(tp->pdev);
8598 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8600 err = tg3_request_irq(tp, 0);
8604 /* Need to reset the chip because the MSI cycle may have terminated
8605 * with Master Abort.
8607 tg3_full_lock(tp, 1);
8609 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8610 err = tg3_init_hw(tp, 1);
8612 tg3_full_unlock(tp);
8615 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8620 static int tg3_request_firmware(struct tg3 *tp)
8622 const __be32 *fw_data;
8624 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8625 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8626 tp->dev->name, tp->fw_needed);
8630 fw_data = (void *)tp->fw->data;
8632 /* Firmware blob starts with version numbers, followed by
8633 * start address and _full_ length including BSS sections
8634 * (which must be longer than the actual data, of course
8637 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8638 if (tp->fw_len < (tp->fw->size - 12)) {
8639 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8640 tp->dev->name, tp->fw_len, tp->fw_needed);
8641 release_firmware(tp->fw);
8646 /* We no longer need firmware; we have it. */
8647 tp->fw_needed = NULL;
8651 static bool tg3_enable_msix(struct tg3 *tp)
8653 int i, rc, cpus = num_online_cpus();
8654 struct msix_entry msix_ent[tp->irq_max];
8657 /* Just fallback to the simpler MSI mode. */
8661 * We want as many rx rings enabled as there are cpus.
8662 * The first MSIX vector only deals with link interrupts, etc,
8663 * so we add one to the number of vectors we are requesting.
8665 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8667 for (i = 0; i < tp->irq_max; i++) {
8668 msix_ent[i].entry = i;
8669 msix_ent[i].vector = 0;
8672 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8674 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8676 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8679 "%s: Requested %d MSI-X vectors, received %d\n",
8680 tp->dev->name, tp->irq_cnt, rc);
8684 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8686 for (i = 0; i < tp->irq_max; i++)
8687 tp->napi[i].irq_vec = msix_ent[i].vector;
8689 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
8690 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8691 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8693 tp->dev->real_num_tx_queues = 1;
8698 static void tg3_ints_init(struct tg3 *tp)
8700 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8701 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8702 /* All MSI supporting chips should support tagged
8703 * status. Assert that this is the case.
8705 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8706 "Not using MSI.\n", tp->dev->name);
8710 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8711 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8712 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8713 pci_enable_msi(tp->pdev) == 0)
8714 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8716 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8717 u32 msi_mode = tr32(MSGINT_MODE);
8718 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8719 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8720 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8723 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8725 tp->napi[0].irq_vec = tp->pdev->irq;
8726 tp->dev->real_num_tx_queues = 1;
8730 static void tg3_ints_fini(struct tg3 *tp)
8732 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8733 pci_disable_msix(tp->pdev);
8734 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8735 pci_disable_msi(tp->pdev);
8736 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8737 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8740 static int tg3_open(struct net_device *dev)
8742 struct tg3 *tp = netdev_priv(dev);
8745 if (tp->fw_needed) {
8746 err = tg3_request_firmware(tp);
8747 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8751 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8753 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8754 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8755 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8757 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8761 netif_carrier_off(tp->dev);
8763 err = tg3_set_power_state(tp, PCI_D0);
8767 tg3_full_lock(tp, 0);
8769 tg3_disable_ints(tp);
8770 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8772 tg3_full_unlock(tp);
8775 * Setup interrupts first so we know how
8776 * many NAPI resources to allocate
8780 /* The placement of this call is tied
8781 * to the setup and use of Host TX descriptors.
8783 err = tg3_alloc_consistent(tp);
8787 tg3_napi_enable(tp);
8789 for (i = 0; i < tp->irq_cnt; i++) {
8790 struct tg3_napi *tnapi = &tp->napi[i];
8791 err = tg3_request_irq(tp, i);
8793 for (i--; i >= 0; i--)
8794 free_irq(tnapi->irq_vec, tnapi);
8802 tg3_full_lock(tp, 0);
8804 err = tg3_init_hw(tp, 1);
8806 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8809 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8810 tp->timer_offset = HZ;
8812 tp->timer_offset = HZ / 10;
8814 BUG_ON(tp->timer_offset > HZ);
8815 tp->timer_counter = tp->timer_multiplier =
8816 (HZ / tp->timer_offset);
8817 tp->asf_counter = tp->asf_multiplier =
8818 ((HZ / tp->timer_offset) * 2);
8820 init_timer(&tp->timer);
8821 tp->timer.expires = jiffies + tp->timer_offset;
8822 tp->timer.data = (unsigned long) tp;
8823 tp->timer.function = tg3_timer;
8826 tg3_full_unlock(tp);
8831 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8832 err = tg3_test_msi(tp);
8835 tg3_full_lock(tp, 0);
8836 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8838 tg3_full_unlock(tp);
8843 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8844 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8845 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8846 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8847 u32 val = tr32(PCIE_TRANSACTION_CFG);
8849 tw32(PCIE_TRANSACTION_CFG,
8850 val | PCIE_TRANS_CFG_1SHOT_MSI);
8856 tg3_full_lock(tp, 0);
8858 add_timer(&tp->timer);
8859 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8860 tg3_enable_ints(tp);
8862 tg3_full_unlock(tp);
8864 netif_tx_start_all_queues(dev);
8869 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8870 struct tg3_napi *tnapi = &tp->napi[i];
8871 free_irq(tnapi->irq_vec, tnapi);
8875 tg3_napi_disable(tp);
8876 tg3_free_consistent(tp);
8884 /*static*/ void tg3_dump_state(struct tg3 *tp)
8886 u32 val32, val32_2, val32_3, val32_4, val32_5;
8889 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8891 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8892 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8893 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8897 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8898 tr32(MAC_MODE), tr32(MAC_STATUS));
8899 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8900 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8901 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8902 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8903 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8904 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8906 /* Send data initiator control block */
8907 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8908 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8909 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8910 tr32(SNDDATAI_STATSCTRL));
8912 /* Send data completion control block */
8913 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8915 /* Send BD ring selector block */
8916 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8917 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8919 /* Send BD initiator control block */
8920 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8921 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8923 /* Send BD completion control block */
8924 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8926 /* Receive list placement control block */
8927 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8928 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8929 printk(" RCVLPC_STATSCTRL[%08x]\n",
8930 tr32(RCVLPC_STATSCTRL));
8932 /* Receive data and receive BD initiator control block */
8933 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8934 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8936 /* Receive data completion control block */
8937 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8940 /* Receive BD initiator control block */
8941 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8942 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8944 /* Receive BD completion control block */
8945 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8946 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8948 /* Receive list selector control block */
8949 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8950 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8952 /* Mbuf cluster free block */
8953 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8954 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8956 /* Host coalescing control block */
8957 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8958 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8959 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8960 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8961 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8962 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8963 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8964 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8965 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8966 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8967 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8968 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8970 /* Memory arbiter control block */
8971 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8972 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8974 /* Buffer manager control block */
8975 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8976 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8977 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8978 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8979 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8980 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8981 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8982 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8984 /* Read DMA control block */
8985 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8986 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8988 /* Write DMA control block */
8989 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8990 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8992 /* DMA completion block */
8993 printk("DEBUG: DMAC_MODE[%08x]\n",
8997 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8998 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8999 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
9000 tr32(GRC_LOCAL_CTRL));
9003 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
9004 tr32(RCVDBDI_JUMBO_BD + 0x0),
9005 tr32(RCVDBDI_JUMBO_BD + 0x4),
9006 tr32(RCVDBDI_JUMBO_BD + 0x8),
9007 tr32(RCVDBDI_JUMBO_BD + 0xc));
9008 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
9009 tr32(RCVDBDI_STD_BD + 0x0),
9010 tr32(RCVDBDI_STD_BD + 0x4),
9011 tr32(RCVDBDI_STD_BD + 0x8),
9012 tr32(RCVDBDI_STD_BD + 0xc));
9013 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
9014 tr32(RCVDBDI_MINI_BD + 0x0),
9015 tr32(RCVDBDI_MINI_BD + 0x4),
9016 tr32(RCVDBDI_MINI_BD + 0x8),
9017 tr32(RCVDBDI_MINI_BD + 0xc));
9019 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
9020 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
9021 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
9022 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
9023 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
9024 val32, val32_2, val32_3, val32_4);
9026 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
9027 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
9028 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
9029 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
9030 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
9031 val32, val32_2, val32_3, val32_4);
9033 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
9034 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
9035 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
9036 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
9037 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
9038 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
9039 val32, val32_2, val32_3, val32_4, val32_5);
9041 /* SW status block */
9043 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
9046 sblk->rx_jumbo_consumer,
9048 sblk->rx_mini_consumer,
9049 sblk->idx[0].rx_producer,
9050 sblk->idx[0].tx_consumer);
9052 /* SW statistics block */
9053 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
9054 ((u32 *)tp->hw_stats)[0],
9055 ((u32 *)tp->hw_stats)[1],
9056 ((u32 *)tp->hw_stats)[2],
9057 ((u32 *)tp->hw_stats)[3]);
9060 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
9061 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
9062 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
9063 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
9064 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
9066 /* NIC side send descriptors. */
9067 for (i = 0; i < 6; i++) {
9070 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
9071 + (i * sizeof(struct tg3_tx_buffer_desc));
9072 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
9074 readl(txd + 0x0), readl(txd + 0x4),
9075 readl(txd + 0x8), readl(txd + 0xc));
9078 /* NIC side RX descriptors. */
9079 for (i = 0; i < 6; i++) {
9082 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
9083 + (i * sizeof(struct tg3_rx_buffer_desc));
9084 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
9086 readl(rxd + 0x0), readl(rxd + 0x4),
9087 readl(rxd + 0x8), readl(rxd + 0xc));
9088 rxd += (4 * sizeof(u32));
9089 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
9091 readl(rxd + 0x0), readl(rxd + 0x4),
9092 readl(rxd + 0x8), readl(rxd + 0xc));
9095 for (i = 0; i < 6; i++) {
9098 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
9099 + (i * sizeof(struct tg3_rx_buffer_desc));
9100 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
9102 readl(rxd + 0x0), readl(rxd + 0x4),
9103 readl(rxd + 0x8), readl(rxd + 0xc));
9104 rxd += (4 * sizeof(u32));
9105 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
9107 readl(rxd + 0x0), readl(rxd + 0x4),
9108 readl(rxd + 0x8), readl(rxd + 0xc));
9113 static struct net_device_stats *tg3_get_stats(struct net_device *);
9114 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9116 static int tg3_close(struct net_device *dev)
9119 struct tg3 *tp = netdev_priv(dev);
9121 tg3_napi_disable(tp);
9122 cancel_work_sync(&tp->reset_task);
9124 netif_tx_stop_all_queues(dev);
9126 del_timer_sync(&tp->timer);
9130 tg3_full_lock(tp, 1);
9135 tg3_disable_ints(tp);
9137 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9139 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9141 tg3_full_unlock(tp);
9143 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9144 struct tg3_napi *tnapi = &tp->napi[i];
9145 free_irq(tnapi->irq_vec, tnapi);
9150 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
9151 sizeof(tp->net_stats_prev));
9152 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9153 sizeof(tp->estats_prev));
9155 tg3_free_consistent(tp);
9157 tg3_set_power_state(tp, PCI_D3hot);
9159 netif_carrier_off(tp->dev);
9164 static inline unsigned long get_stat64(tg3_stat64_t *val)
9168 #if (BITS_PER_LONG == 32)
9171 ret = ((u64)val->high << 32) | ((u64)val->low);
9176 static inline u64 get_estat64(tg3_stat64_t *val)
9178 return ((u64)val->high << 32) | ((u64)val->low);
9181 static unsigned long calc_crc_errors(struct tg3 *tp)
9183 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9185 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9186 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9190 spin_lock_bh(&tp->lock);
9191 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9192 tg3_writephy(tp, MII_TG3_TEST1,
9193 val | MII_TG3_TEST1_CRC_EN);
9194 tg3_readphy(tp, 0x14, &val);
9197 spin_unlock_bh(&tp->lock);
9199 tp->phy_crc_errors += val;
9201 return tp->phy_crc_errors;
9204 return get_stat64(&hw_stats->rx_fcs_errors);
9207 #define ESTAT_ADD(member) \
9208 estats->member = old_estats->member + \
9209 get_estat64(&hw_stats->member)
9211 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9213 struct tg3_ethtool_stats *estats = &tp->estats;
9214 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9215 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9220 ESTAT_ADD(rx_octets);
9221 ESTAT_ADD(rx_fragments);
9222 ESTAT_ADD(rx_ucast_packets);
9223 ESTAT_ADD(rx_mcast_packets);
9224 ESTAT_ADD(rx_bcast_packets);
9225 ESTAT_ADD(rx_fcs_errors);
9226 ESTAT_ADD(rx_align_errors);
9227 ESTAT_ADD(rx_xon_pause_rcvd);
9228 ESTAT_ADD(rx_xoff_pause_rcvd);
9229 ESTAT_ADD(rx_mac_ctrl_rcvd);
9230 ESTAT_ADD(rx_xoff_entered);
9231 ESTAT_ADD(rx_frame_too_long_errors);
9232 ESTAT_ADD(rx_jabbers);
9233 ESTAT_ADD(rx_undersize_packets);
9234 ESTAT_ADD(rx_in_length_errors);
9235 ESTAT_ADD(rx_out_length_errors);
9236 ESTAT_ADD(rx_64_or_less_octet_packets);
9237 ESTAT_ADD(rx_65_to_127_octet_packets);
9238 ESTAT_ADD(rx_128_to_255_octet_packets);
9239 ESTAT_ADD(rx_256_to_511_octet_packets);
9240 ESTAT_ADD(rx_512_to_1023_octet_packets);
9241 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9242 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9243 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9244 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9245 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9247 ESTAT_ADD(tx_octets);
9248 ESTAT_ADD(tx_collisions);
9249 ESTAT_ADD(tx_xon_sent);
9250 ESTAT_ADD(tx_xoff_sent);
9251 ESTAT_ADD(tx_flow_control);
9252 ESTAT_ADD(tx_mac_errors);
9253 ESTAT_ADD(tx_single_collisions);
9254 ESTAT_ADD(tx_mult_collisions);
9255 ESTAT_ADD(tx_deferred);
9256 ESTAT_ADD(tx_excessive_collisions);
9257 ESTAT_ADD(tx_late_collisions);
9258 ESTAT_ADD(tx_collide_2times);
9259 ESTAT_ADD(tx_collide_3times);
9260 ESTAT_ADD(tx_collide_4times);
9261 ESTAT_ADD(tx_collide_5times);
9262 ESTAT_ADD(tx_collide_6times);
9263 ESTAT_ADD(tx_collide_7times);
9264 ESTAT_ADD(tx_collide_8times);
9265 ESTAT_ADD(tx_collide_9times);
9266 ESTAT_ADD(tx_collide_10times);
9267 ESTAT_ADD(tx_collide_11times);
9268 ESTAT_ADD(tx_collide_12times);
9269 ESTAT_ADD(tx_collide_13times);
9270 ESTAT_ADD(tx_collide_14times);
9271 ESTAT_ADD(tx_collide_15times);
9272 ESTAT_ADD(tx_ucast_packets);
9273 ESTAT_ADD(tx_mcast_packets);
9274 ESTAT_ADD(tx_bcast_packets);
9275 ESTAT_ADD(tx_carrier_sense_errors);
9276 ESTAT_ADD(tx_discards);
9277 ESTAT_ADD(tx_errors);
9279 ESTAT_ADD(dma_writeq_full);
9280 ESTAT_ADD(dma_write_prioq_full);
9281 ESTAT_ADD(rxbds_empty);
9282 ESTAT_ADD(rx_discards);
9283 ESTAT_ADD(rx_errors);
9284 ESTAT_ADD(rx_threshold_hit);
9286 ESTAT_ADD(dma_readq_full);
9287 ESTAT_ADD(dma_read_prioq_full);
9288 ESTAT_ADD(tx_comp_queue_full);
9290 ESTAT_ADD(ring_set_send_prod_index);
9291 ESTAT_ADD(ring_status_update);
9292 ESTAT_ADD(nic_irqs);
9293 ESTAT_ADD(nic_avoided_irqs);
9294 ESTAT_ADD(nic_tx_threshold_hit);
9299 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
9301 struct tg3 *tp = netdev_priv(dev);
9302 struct net_device_stats *stats = &tp->net_stats;
9303 struct net_device_stats *old_stats = &tp->net_stats_prev;
9304 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9309 stats->rx_packets = old_stats->rx_packets +
9310 get_stat64(&hw_stats->rx_ucast_packets) +
9311 get_stat64(&hw_stats->rx_mcast_packets) +
9312 get_stat64(&hw_stats->rx_bcast_packets);
9314 stats->tx_packets = old_stats->tx_packets +
9315 get_stat64(&hw_stats->tx_ucast_packets) +
9316 get_stat64(&hw_stats->tx_mcast_packets) +
9317 get_stat64(&hw_stats->tx_bcast_packets);
9319 stats->rx_bytes = old_stats->rx_bytes +
9320 get_stat64(&hw_stats->rx_octets);
9321 stats->tx_bytes = old_stats->tx_bytes +
9322 get_stat64(&hw_stats->tx_octets);
9324 stats->rx_errors = old_stats->rx_errors +
9325 get_stat64(&hw_stats->rx_errors);
9326 stats->tx_errors = old_stats->tx_errors +
9327 get_stat64(&hw_stats->tx_errors) +
9328 get_stat64(&hw_stats->tx_mac_errors) +
9329 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9330 get_stat64(&hw_stats->tx_discards);
9332 stats->multicast = old_stats->multicast +
9333 get_stat64(&hw_stats->rx_mcast_packets);
9334 stats->collisions = old_stats->collisions +
9335 get_stat64(&hw_stats->tx_collisions);
9337 stats->rx_length_errors = old_stats->rx_length_errors +
9338 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9339 get_stat64(&hw_stats->rx_undersize_packets);
9341 stats->rx_over_errors = old_stats->rx_over_errors +
9342 get_stat64(&hw_stats->rxbds_empty);
9343 stats->rx_frame_errors = old_stats->rx_frame_errors +
9344 get_stat64(&hw_stats->rx_align_errors);
9345 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9346 get_stat64(&hw_stats->tx_discards);
9347 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9348 get_stat64(&hw_stats->tx_carrier_sense_errors);
9350 stats->rx_crc_errors = old_stats->rx_crc_errors +
9351 calc_crc_errors(tp);
9353 stats->rx_missed_errors = old_stats->rx_missed_errors +
9354 get_stat64(&hw_stats->rx_discards);
9359 static inline u32 calc_crc(unsigned char *buf, int len)
9367 for (j = 0; j < len; j++) {
9370 for (k = 0; k < 8; k++) {
9384 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9386 /* accept or reject all multicast frames */
9387 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9388 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9389 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9390 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9393 static void __tg3_set_rx_mode(struct net_device *dev)
9395 struct tg3 *tp = netdev_priv(dev);
9398 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9399 RX_MODE_KEEP_VLAN_TAG);
9401 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9404 #if TG3_VLAN_TAG_USED
9406 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9407 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9409 /* By definition, VLAN is disabled always in this
9412 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9413 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9416 if (dev->flags & IFF_PROMISC) {
9417 /* Promiscuous mode. */
9418 rx_mode |= RX_MODE_PROMISC;
9419 } else if (dev->flags & IFF_ALLMULTI) {
9420 /* Accept all multicast. */
9421 tg3_set_multi (tp, 1);
9422 } else if (dev->mc_count < 1) {
9423 /* Reject all multicast. */
9424 tg3_set_multi (tp, 0);
9426 /* Accept one or more multicast(s). */
9427 struct dev_mc_list *mclist;
9429 u32 mc_filter[4] = { 0, };
9434 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
9435 i++, mclist = mclist->next) {
9437 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
9439 regidx = (bit & 0x60) >> 5;
9441 mc_filter[regidx] |= (1 << bit);
9444 tw32(MAC_HASH_REG_0, mc_filter[0]);
9445 tw32(MAC_HASH_REG_1, mc_filter[1]);
9446 tw32(MAC_HASH_REG_2, mc_filter[2]);
9447 tw32(MAC_HASH_REG_3, mc_filter[3]);
9450 if (rx_mode != tp->rx_mode) {
9451 tp->rx_mode = rx_mode;
9452 tw32_f(MAC_RX_MODE, rx_mode);
9457 static void tg3_set_rx_mode(struct net_device *dev)
9459 struct tg3 *tp = netdev_priv(dev);
9461 if (!netif_running(dev))
9464 tg3_full_lock(tp, 0);
9465 __tg3_set_rx_mode(dev);
9466 tg3_full_unlock(tp);
9469 #define TG3_REGDUMP_LEN (32 * 1024)
9471 static int tg3_get_regs_len(struct net_device *dev)
9473 return TG3_REGDUMP_LEN;
9476 static void tg3_get_regs(struct net_device *dev,
9477 struct ethtool_regs *regs, void *_p)
9480 struct tg3 *tp = netdev_priv(dev);
9486 memset(p, 0, TG3_REGDUMP_LEN);
9488 if (tp->link_config.phy_is_low_power)
9491 tg3_full_lock(tp, 0);
9493 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9494 #define GET_REG32_LOOP(base,len) \
9495 do { p = (u32 *)(orig_p + (base)); \
9496 for (i = 0; i < len; i += 4) \
9497 __GET_REG32((base) + i); \
9499 #define GET_REG32_1(reg) \
9500 do { p = (u32 *)(orig_p + (reg)); \
9501 __GET_REG32((reg)); \
9504 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9505 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9506 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9507 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9508 GET_REG32_1(SNDDATAC_MODE);
9509 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9510 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9511 GET_REG32_1(SNDBDC_MODE);
9512 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9513 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9514 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9515 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9516 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9517 GET_REG32_1(RCVDCC_MODE);
9518 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9519 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9520 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9521 GET_REG32_1(MBFREE_MODE);
9522 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9523 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9524 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9525 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9526 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9527 GET_REG32_1(RX_CPU_MODE);
9528 GET_REG32_1(RX_CPU_STATE);
9529 GET_REG32_1(RX_CPU_PGMCTR);
9530 GET_REG32_1(RX_CPU_HWBKPT);
9531 GET_REG32_1(TX_CPU_MODE);
9532 GET_REG32_1(TX_CPU_STATE);
9533 GET_REG32_1(TX_CPU_PGMCTR);
9534 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9535 GET_REG32_LOOP(FTQ_RESET, 0x120);
9536 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9537 GET_REG32_1(DMAC_MODE);
9538 GET_REG32_LOOP(GRC_MODE, 0x4c);
9539 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9540 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9543 #undef GET_REG32_LOOP
9546 tg3_full_unlock(tp);
9549 static int tg3_get_eeprom_len(struct net_device *dev)
9551 struct tg3 *tp = netdev_priv(dev);
9553 return tp->nvram_size;
9556 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9558 struct tg3 *tp = netdev_priv(dev);
9561 u32 i, offset, len, b_offset, b_count;
9564 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9567 if (tp->link_config.phy_is_low_power)
9570 offset = eeprom->offset;
9574 eeprom->magic = TG3_EEPROM_MAGIC;
9577 /* adjustments to start on required 4 byte boundary */
9578 b_offset = offset & 3;
9579 b_count = 4 - b_offset;
9580 if (b_count > len) {
9581 /* i.e. offset=1 len=2 */
9584 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9587 memcpy(data, ((char*)&val) + b_offset, b_count);
9590 eeprom->len += b_count;
9593 /* read bytes upto the last 4 byte boundary */
9594 pd = &data[eeprom->len];
9595 for (i = 0; i < (len - (len & 3)); i += 4) {
9596 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9601 memcpy(pd + i, &val, 4);
9606 /* read last bytes not ending on 4 byte boundary */
9607 pd = &data[eeprom->len];
9609 b_offset = offset + len - b_count;
9610 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9613 memcpy(pd, &val, b_count);
9614 eeprom->len += b_count;
9619 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9621 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9623 struct tg3 *tp = netdev_priv(dev);
9625 u32 offset, len, b_offset, odd_len;
9629 if (tp->link_config.phy_is_low_power)
9632 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9633 eeprom->magic != TG3_EEPROM_MAGIC)
9636 offset = eeprom->offset;
9639 if ((b_offset = (offset & 3))) {
9640 /* adjustments to start on required 4 byte boundary */
9641 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9652 /* adjustments to end on required 4 byte boundary */
9654 len = (len + 3) & ~3;
9655 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9661 if (b_offset || odd_len) {
9662 buf = kmalloc(len, GFP_KERNEL);
9666 memcpy(buf, &start, 4);
9668 memcpy(buf+len-4, &end, 4);
9669 memcpy(buf + b_offset, data, eeprom->len);
9672 ret = tg3_nvram_write_block(tp, offset, len, buf);
9680 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9682 struct tg3 *tp = netdev_priv(dev);
9684 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9685 struct phy_device *phydev;
9686 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9688 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9689 return phy_ethtool_gset(phydev, cmd);
9692 cmd->supported = (SUPPORTED_Autoneg);
9694 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9695 cmd->supported |= (SUPPORTED_1000baseT_Half |
9696 SUPPORTED_1000baseT_Full);
9698 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9699 cmd->supported |= (SUPPORTED_100baseT_Half |
9700 SUPPORTED_100baseT_Full |
9701 SUPPORTED_10baseT_Half |
9702 SUPPORTED_10baseT_Full |
9704 cmd->port = PORT_TP;
9706 cmd->supported |= SUPPORTED_FIBRE;
9707 cmd->port = PORT_FIBRE;
9710 cmd->advertising = tp->link_config.advertising;
9711 if (netif_running(dev)) {
9712 cmd->speed = tp->link_config.active_speed;
9713 cmd->duplex = tp->link_config.active_duplex;
9715 cmd->phy_address = tp->phy_addr;
9716 cmd->transceiver = XCVR_INTERNAL;
9717 cmd->autoneg = tp->link_config.autoneg;
9723 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9725 struct tg3 *tp = netdev_priv(dev);
9727 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9728 struct phy_device *phydev;
9729 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9731 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9732 return phy_ethtool_sset(phydev, cmd);
9735 if (cmd->autoneg != AUTONEG_ENABLE &&
9736 cmd->autoneg != AUTONEG_DISABLE)
9739 if (cmd->autoneg == AUTONEG_DISABLE &&
9740 cmd->duplex != DUPLEX_FULL &&
9741 cmd->duplex != DUPLEX_HALF)
9744 if (cmd->autoneg == AUTONEG_ENABLE) {
9745 u32 mask = ADVERTISED_Autoneg |
9747 ADVERTISED_Asym_Pause;
9749 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9750 mask |= ADVERTISED_1000baseT_Half |
9751 ADVERTISED_1000baseT_Full;
9753 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9754 mask |= ADVERTISED_100baseT_Half |
9755 ADVERTISED_100baseT_Full |
9756 ADVERTISED_10baseT_Half |
9757 ADVERTISED_10baseT_Full |
9760 mask |= ADVERTISED_FIBRE;
9762 if (cmd->advertising & ~mask)
9765 mask &= (ADVERTISED_1000baseT_Half |
9766 ADVERTISED_1000baseT_Full |
9767 ADVERTISED_100baseT_Half |
9768 ADVERTISED_100baseT_Full |
9769 ADVERTISED_10baseT_Half |
9770 ADVERTISED_10baseT_Full);
9772 cmd->advertising &= mask;
9774 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9775 if (cmd->speed != SPEED_1000)
9778 if (cmd->duplex != DUPLEX_FULL)
9781 if (cmd->speed != SPEED_100 &&
9782 cmd->speed != SPEED_10)
9787 tg3_full_lock(tp, 0);
9789 tp->link_config.autoneg = cmd->autoneg;
9790 if (cmd->autoneg == AUTONEG_ENABLE) {
9791 tp->link_config.advertising = (cmd->advertising |
9792 ADVERTISED_Autoneg);
9793 tp->link_config.speed = SPEED_INVALID;
9794 tp->link_config.duplex = DUPLEX_INVALID;
9796 tp->link_config.advertising = 0;
9797 tp->link_config.speed = cmd->speed;
9798 tp->link_config.duplex = cmd->duplex;
9801 tp->link_config.orig_speed = tp->link_config.speed;
9802 tp->link_config.orig_duplex = tp->link_config.duplex;
9803 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9805 if (netif_running(dev))
9806 tg3_setup_phy(tp, 1);
9808 tg3_full_unlock(tp);
9813 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9815 struct tg3 *tp = netdev_priv(dev);
9817 strcpy(info->driver, DRV_MODULE_NAME);
9818 strcpy(info->version, DRV_MODULE_VERSION);
9819 strcpy(info->fw_version, tp->fw_ver);
9820 strcpy(info->bus_info, pci_name(tp->pdev));
9823 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9825 struct tg3 *tp = netdev_priv(dev);
9827 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9828 device_can_wakeup(&tp->pdev->dev))
9829 wol->supported = WAKE_MAGIC;
9833 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9834 device_can_wakeup(&tp->pdev->dev))
9835 wol->wolopts = WAKE_MAGIC;
9836 memset(&wol->sopass, 0, sizeof(wol->sopass));
9839 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9841 struct tg3 *tp = netdev_priv(dev);
9842 struct device *dp = &tp->pdev->dev;
9844 if (wol->wolopts & ~WAKE_MAGIC)
9846 if ((wol->wolopts & WAKE_MAGIC) &&
9847 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9850 spin_lock_bh(&tp->lock);
9851 if (wol->wolopts & WAKE_MAGIC) {
9852 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9853 device_set_wakeup_enable(dp, true);
9855 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9856 device_set_wakeup_enable(dp, false);
9858 spin_unlock_bh(&tp->lock);
9863 static u32 tg3_get_msglevel(struct net_device *dev)
9865 struct tg3 *tp = netdev_priv(dev);
9866 return tp->msg_enable;
9869 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9871 struct tg3 *tp = netdev_priv(dev);
9872 tp->msg_enable = value;
9875 static int tg3_set_tso(struct net_device *dev, u32 value)
9877 struct tg3 *tp = netdev_priv(dev);
9879 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9884 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9885 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9886 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9888 dev->features |= NETIF_F_TSO6;
9889 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9891 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9892 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9893 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9895 dev->features |= NETIF_F_TSO_ECN;
9897 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9899 return ethtool_op_set_tso(dev, value);
9902 static int tg3_nway_reset(struct net_device *dev)
9904 struct tg3 *tp = netdev_priv(dev);
9907 if (!netif_running(dev))
9910 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9913 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9914 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9916 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9920 spin_lock_bh(&tp->lock);
9922 tg3_readphy(tp, MII_BMCR, &bmcr);
9923 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9924 ((bmcr & BMCR_ANENABLE) ||
9925 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9926 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9930 spin_unlock_bh(&tp->lock);
9936 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9938 struct tg3 *tp = netdev_priv(dev);
9940 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9941 ering->rx_mini_max_pending = 0;
9942 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9943 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9945 ering->rx_jumbo_max_pending = 0;
9947 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9949 ering->rx_pending = tp->rx_pending;
9950 ering->rx_mini_pending = 0;
9951 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9952 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9954 ering->rx_jumbo_pending = 0;
9956 ering->tx_pending = tp->napi[0].tx_pending;
9959 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9961 struct tg3 *tp = netdev_priv(dev);
9962 int i, irq_sync = 0, err = 0;
9964 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9965 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9966 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9967 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9968 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9969 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9972 if (netif_running(dev)) {
9978 tg3_full_lock(tp, irq_sync);
9980 tp->rx_pending = ering->rx_pending;
9982 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9983 tp->rx_pending > 63)
9984 tp->rx_pending = 63;
9985 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9987 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9988 tp->napi[i].tx_pending = ering->tx_pending;
9990 if (netif_running(dev)) {
9991 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9992 err = tg3_restart_hw(tp, 1);
9994 tg3_netif_start(tp);
9997 tg3_full_unlock(tp);
9999 if (irq_sync && !err)
10005 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10007 struct tg3 *tp = netdev_priv(dev);
10009 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10011 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10012 epause->rx_pause = 1;
10014 epause->rx_pause = 0;
10016 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10017 epause->tx_pause = 1;
10019 epause->tx_pause = 0;
10022 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10024 struct tg3 *tp = netdev_priv(dev);
10027 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10028 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10031 if (epause->autoneg) {
10033 struct phy_device *phydev;
10035 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10037 if (epause->rx_pause) {
10038 if (epause->tx_pause)
10039 newadv = ADVERTISED_Pause;
10041 newadv = ADVERTISED_Pause |
10042 ADVERTISED_Asym_Pause;
10043 } else if (epause->tx_pause) {
10044 newadv = ADVERTISED_Asym_Pause;
10048 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
10049 u32 oldadv = phydev->advertising &
10050 (ADVERTISED_Pause |
10051 ADVERTISED_Asym_Pause);
10052 if (oldadv != newadv) {
10053 phydev->advertising &=
10054 ~(ADVERTISED_Pause |
10055 ADVERTISED_Asym_Pause);
10056 phydev->advertising |= newadv;
10057 err = phy_start_aneg(phydev);
10060 tp->link_config.advertising &=
10061 ~(ADVERTISED_Pause |
10062 ADVERTISED_Asym_Pause);
10063 tp->link_config.advertising |= newadv;
10066 if (epause->rx_pause)
10067 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10069 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10071 if (epause->tx_pause)
10072 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10074 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10076 if (netif_running(dev))
10077 tg3_setup_flow_control(tp, 0, 0);
10082 if (netif_running(dev)) {
10083 tg3_netif_stop(tp);
10087 tg3_full_lock(tp, irq_sync);
10089 if (epause->autoneg)
10090 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10092 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10093 if (epause->rx_pause)
10094 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10096 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10097 if (epause->tx_pause)
10098 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10100 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10102 if (netif_running(dev)) {
10103 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10104 err = tg3_restart_hw(tp, 1);
10106 tg3_netif_start(tp);
10109 tg3_full_unlock(tp);
10115 static u32 tg3_get_rx_csum(struct net_device *dev)
10117 struct tg3 *tp = netdev_priv(dev);
10118 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10121 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10123 struct tg3 *tp = netdev_priv(dev);
10125 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10131 spin_lock_bh(&tp->lock);
10133 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10135 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10136 spin_unlock_bh(&tp->lock);
10141 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10143 struct tg3 *tp = netdev_priv(dev);
10145 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10151 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10152 ethtool_op_set_tx_ipv6_csum(dev, data);
10154 ethtool_op_set_tx_csum(dev, data);
10159 static int tg3_get_sset_count (struct net_device *dev, int sset)
10163 return TG3_NUM_TEST;
10165 return TG3_NUM_STATS;
10167 return -EOPNOTSUPP;
10171 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
10173 switch (stringset) {
10175 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10178 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10181 WARN_ON(1); /* we need a WARN() */
10186 static int tg3_phys_id(struct net_device *dev, u32 data)
10188 struct tg3 *tp = netdev_priv(dev);
10191 if (!netif_running(tp->dev))
10195 data = UINT_MAX / 2;
10197 for (i = 0; i < (data * 2); i++) {
10199 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10200 LED_CTRL_1000MBPS_ON |
10201 LED_CTRL_100MBPS_ON |
10202 LED_CTRL_10MBPS_ON |
10203 LED_CTRL_TRAFFIC_OVERRIDE |
10204 LED_CTRL_TRAFFIC_BLINK |
10205 LED_CTRL_TRAFFIC_LED);
10208 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10209 LED_CTRL_TRAFFIC_OVERRIDE);
10211 if (msleep_interruptible(500))
10214 tw32(MAC_LED_CTRL, tp->led_ctrl);
10218 static void tg3_get_ethtool_stats (struct net_device *dev,
10219 struct ethtool_stats *estats, u64 *tmp_stats)
10221 struct tg3 *tp = netdev_priv(dev);
10222 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10225 #define NVRAM_TEST_SIZE 0x100
10226 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10227 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10228 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10229 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10230 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10232 static int tg3_test_nvram(struct tg3 *tp)
10236 int i, j, k, err = 0, size;
10238 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10241 if (tg3_nvram_read(tp, 0, &magic) != 0)
10244 if (magic == TG3_EEPROM_MAGIC)
10245 size = NVRAM_TEST_SIZE;
10246 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10247 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10248 TG3_EEPROM_SB_FORMAT_1) {
10249 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10250 case TG3_EEPROM_SB_REVISION_0:
10251 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10253 case TG3_EEPROM_SB_REVISION_2:
10254 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10256 case TG3_EEPROM_SB_REVISION_3:
10257 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10264 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10265 size = NVRAM_SELFBOOT_HW_SIZE;
10269 buf = kmalloc(size, GFP_KERNEL);
10274 for (i = 0, j = 0; i < size; i += 4, j++) {
10275 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10282 /* Selfboot format */
10283 magic = be32_to_cpu(buf[0]);
10284 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10285 TG3_EEPROM_MAGIC_FW) {
10286 u8 *buf8 = (u8 *) buf, csum8 = 0;
10288 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10289 TG3_EEPROM_SB_REVISION_2) {
10290 /* For rev 2, the csum doesn't include the MBA. */
10291 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10293 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10296 for (i = 0; i < size; i++)
10309 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10310 TG3_EEPROM_MAGIC_HW) {
10311 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10312 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10313 u8 *buf8 = (u8 *) buf;
10315 /* Separate the parity bits and the data bytes. */
10316 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10317 if ((i == 0) || (i == 8)) {
10321 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10322 parity[k++] = buf8[i] & msk;
10325 else if (i == 16) {
10329 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10330 parity[k++] = buf8[i] & msk;
10333 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10334 parity[k++] = buf8[i] & msk;
10337 data[j++] = buf8[i];
10341 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10342 u8 hw8 = hweight8(data[i]);
10344 if ((hw8 & 0x1) && parity[i])
10346 else if (!(hw8 & 0x1) && !parity[i])
10353 /* Bootstrap checksum at offset 0x10 */
10354 csum = calc_crc((unsigned char *) buf, 0x10);
10355 if (csum != be32_to_cpu(buf[0x10/4]))
10358 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10359 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10360 if (csum != be32_to_cpu(buf[0xfc/4]))
10370 #define TG3_SERDES_TIMEOUT_SEC 2
10371 #define TG3_COPPER_TIMEOUT_SEC 6
10373 static int tg3_test_link(struct tg3 *tp)
10377 if (!netif_running(tp->dev))
10380 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10381 max = TG3_SERDES_TIMEOUT_SEC;
10383 max = TG3_COPPER_TIMEOUT_SEC;
10385 for (i = 0; i < max; i++) {
10386 if (netif_carrier_ok(tp->dev))
10389 if (msleep_interruptible(1000))
10396 /* Only test the commonly used registers */
10397 static int tg3_test_registers(struct tg3 *tp)
10399 int i, is_5705, is_5750;
10400 u32 offset, read_mask, write_mask, val, save_val, read_val;
10404 #define TG3_FL_5705 0x1
10405 #define TG3_FL_NOT_5705 0x2
10406 #define TG3_FL_NOT_5788 0x4
10407 #define TG3_FL_NOT_5750 0x8
10411 /* MAC Control Registers */
10412 { MAC_MODE, TG3_FL_NOT_5705,
10413 0x00000000, 0x00ef6f8c },
10414 { MAC_MODE, TG3_FL_5705,
10415 0x00000000, 0x01ef6b8c },
10416 { MAC_STATUS, TG3_FL_NOT_5705,
10417 0x03800107, 0x00000000 },
10418 { MAC_STATUS, TG3_FL_5705,
10419 0x03800100, 0x00000000 },
10420 { MAC_ADDR_0_HIGH, 0x0000,
10421 0x00000000, 0x0000ffff },
10422 { MAC_ADDR_0_LOW, 0x0000,
10423 0x00000000, 0xffffffff },
10424 { MAC_RX_MTU_SIZE, 0x0000,
10425 0x00000000, 0x0000ffff },
10426 { MAC_TX_MODE, 0x0000,
10427 0x00000000, 0x00000070 },
10428 { MAC_TX_LENGTHS, 0x0000,
10429 0x00000000, 0x00003fff },
10430 { MAC_RX_MODE, TG3_FL_NOT_5705,
10431 0x00000000, 0x000007fc },
10432 { MAC_RX_MODE, TG3_FL_5705,
10433 0x00000000, 0x000007dc },
10434 { MAC_HASH_REG_0, 0x0000,
10435 0x00000000, 0xffffffff },
10436 { MAC_HASH_REG_1, 0x0000,
10437 0x00000000, 0xffffffff },
10438 { MAC_HASH_REG_2, 0x0000,
10439 0x00000000, 0xffffffff },
10440 { MAC_HASH_REG_3, 0x0000,
10441 0x00000000, 0xffffffff },
10443 /* Receive Data and Receive BD Initiator Control Registers. */
10444 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10445 0x00000000, 0xffffffff },
10446 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10447 0x00000000, 0xffffffff },
10448 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10449 0x00000000, 0x00000003 },
10450 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10451 0x00000000, 0xffffffff },
10452 { RCVDBDI_STD_BD+0, 0x0000,
10453 0x00000000, 0xffffffff },
10454 { RCVDBDI_STD_BD+4, 0x0000,
10455 0x00000000, 0xffffffff },
10456 { RCVDBDI_STD_BD+8, 0x0000,
10457 0x00000000, 0xffff0002 },
10458 { RCVDBDI_STD_BD+0xc, 0x0000,
10459 0x00000000, 0xffffffff },
10461 /* Receive BD Initiator Control Registers. */
10462 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10463 0x00000000, 0xffffffff },
10464 { RCVBDI_STD_THRESH, TG3_FL_5705,
10465 0x00000000, 0x000003ff },
10466 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10467 0x00000000, 0xffffffff },
10469 /* Host Coalescing Control Registers. */
10470 { HOSTCC_MODE, TG3_FL_NOT_5705,
10471 0x00000000, 0x00000004 },
10472 { HOSTCC_MODE, TG3_FL_5705,
10473 0x00000000, 0x000000f6 },
10474 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10475 0x00000000, 0xffffffff },
10476 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10477 0x00000000, 0x000003ff },
10478 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10479 0x00000000, 0xffffffff },
10480 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10481 0x00000000, 0x000003ff },
10482 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10483 0x00000000, 0xffffffff },
10484 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10485 0x00000000, 0x000000ff },
10486 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10487 0x00000000, 0xffffffff },
10488 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10489 0x00000000, 0x000000ff },
10490 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10491 0x00000000, 0xffffffff },
10492 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10493 0x00000000, 0xffffffff },
10494 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10495 0x00000000, 0xffffffff },
10496 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10497 0x00000000, 0x000000ff },
10498 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10499 0x00000000, 0xffffffff },
10500 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10501 0x00000000, 0x000000ff },
10502 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10503 0x00000000, 0xffffffff },
10504 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10505 0x00000000, 0xffffffff },
10506 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10507 0x00000000, 0xffffffff },
10508 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10509 0x00000000, 0xffffffff },
10510 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10511 0x00000000, 0xffffffff },
10512 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10513 0xffffffff, 0x00000000 },
10514 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10515 0xffffffff, 0x00000000 },
10517 /* Buffer Manager Control Registers. */
10518 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10519 0x00000000, 0x007fff80 },
10520 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10521 0x00000000, 0x007fffff },
10522 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10523 0x00000000, 0x0000003f },
10524 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10525 0x00000000, 0x000001ff },
10526 { BUFMGR_MB_HIGH_WATER, 0x0000,
10527 0x00000000, 0x000001ff },
10528 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10529 0xffffffff, 0x00000000 },
10530 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10531 0xffffffff, 0x00000000 },
10533 /* Mailbox Registers */
10534 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10535 0x00000000, 0x000001ff },
10536 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10537 0x00000000, 0x000001ff },
10538 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10539 0x00000000, 0x000007ff },
10540 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10541 0x00000000, 0x000001ff },
10543 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10546 is_5705 = is_5750 = 0;
10547 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10549 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10553 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10554 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10557 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10560 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10561 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10564 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10567 offset = (u32) reg_tbl[i].offset;
10568 read_mask = reg_tbl[i].read_mask;
10569 write_mask = reg_tbl[i].write_mask;
10571 /* Save the original register content */
10572 save_val = tr32(offset);
10574 /* Determine the read-only value. */
10575 read_val = save_val & read_mask;
10577 /* Write zero to the register, then make sure the read-only bits
10578 * are not changed and the read/write bits are all zeros.
10582 val = tr32(offset);
10584 /* Test the read-only and read/write bits. */
10585 if (((val & read_mask) != read_val) || (val & write_mask))
10588 /* Write ones to all the bits defined by RdMask and WrMask, then
10589 * make sure the read-only bits are not changed and the
10590 * read/write bits are all ones.
10592 tw32(offset, read_mask | write_mask);
10594 val = tr32(offset);
10596 /* Test the read-only bits. */
10597 if ((val & read_mask) != read_val)
10600 /* Test the read/write bits. */
10601 if ((val & write_mask) != write_mask)
10604 tw32(offset, save_val);
10610 if (netif_msg_hw(tp))
10611 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10613 tw32(offset, save_val);
10617 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10619 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10623 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10624 for (j = 0; j < len; j += 4) {
10627 tg3_write_mem(tp, offset + j, test_pattern[i]);
10628 tg3_read_mem(tp, offset + j, &val);
10629 if (val != test_pattern[i])
10636 static int tg3_test_memory(struct tg3 *tp)
10638 static struct mem_entry {
10641 } mem_tbl_570x[] = {
10642 { 0x00000000, 0x00b50},
10643 { 0x00002000, 0x1c000},
10644 { 0xffffffff, 0x00000}
10645 }, mem_tbl_5705[] = {
10646 { 0x00000100, 0x0000c},
10647 { 0x00000200, 0x00008},
10648 { 0x00004000, 0x00800},
10649 { 0x00006000, 0x01000},
10650 { 0x00008000, 0x02000},
10651 { 0x00010000, 0x0e000},
10652 { 0xffffffff, 0x00000}
10653 }, mem_tbl_5755[] = {
10654 { 0x00000200, 0x00008},
10655 { 0x00004000, 0x00800},
10656 { 0x00006000, 0x00800},
10657 { 0x00008000, 0x02000},
10658 { 0x00010000, 0x0c000},
10659 { 0xffffffff, 0x00000}
10660 }, mem_tbl_5906[] = {
10661 { 0x00000200, 0x00008},
10662 { 0x00004000, 0x00400},
10663 { 0x00006000, 0x00400},
10664 { 0x00008000, 0x01000},
10665 { 0x00010000, 0x01000},
10666 { 0xffffffff, 0x00000}
10667 }, mem_tbl_5717[] = {
10668 { 0x00000200, 0x00008},
10669 { 0x00010000, 0x0a000},
10670 { 0x00020000, 0x13c00},
10671 { 0xffffffff, 0x00000}
10672 }, mem_tbl_57765[] = {
10673 { 0x00000200, 0x00008},
10674 { 0x00004000, 0x00800},
10675 { 0x00006000, 0x09800},
10676 { 0x00010000, 0x0a000},
10677 { 0xffffffff, 0x00000}
10679 struct mem_entry *mem_tbl;
10683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
10684 mem_tbl = mem_tbl_5717;
10685 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10686 mem_tbl = mem_tbl_57765;
10687 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10688 mem_tbl = mem_tbl_5755;
10689 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10690 mem_tbl = mem_tbl_5906;
10691 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10692 mem_tbl = mem_tbl_5705;
10694 mem_tbl = mem_tbl_570x;
10696 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10697 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10698 mem_tbl[i].len)) != 0)
10705 #define TG3_MAC_LOOPBACK 0
10706 #define TG3_PHY_LOOPBACK 1
10708 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10710 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10711 u32 desc_idx, coal_now;
10712 struct sk_buff *skb, *rx_skb;
10715 int num_pkts, tx_len, rx_len, i, err;
10716 struct tg3_rx_buffer_desc *desc;
10717 struct tg3_napi *tnapi, *rnapi;
10718 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10720 if (tp->irq_cnt > 1) {
10721 tnapi = &tp->napi[1];
10722 rnapi = &tp->napi[1];
10724 tnapi = &tp->napi[0];
10725 rnapi = &tp->napi[0];
10727 coal_now = tnapi->coal_now | rnapi->coal_now;
10729 if (loopback_mode == TG3_MAC_LOOPBACK) {
10730 /* HW errata - mac loopback fails in some cases on 5780.
10731 * Normal traffic and PHY loopback are not affected by
10734 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10737 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10738 MAC_MODE_PORT_INT_LPBACK;
10739 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10740 mac_mode |= MAC_MODE_LINK_POLARITY;
10741 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10742 mac_mode |= MAC_MODE_PORT_MODE_MII;
10744 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10745 tw32(MAC_MODE, mac_mode);
10746 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10749 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10750 tg3_phy_fet_toggle_apd(tp, false);
10751 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10753 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10755 tg3_phy_toggle_automdix(tp, 0);
10757 tg3_writephy(tp, MII_BMCR, val);
10760 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10761 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10762 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10763 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10764 mac_mode |= MAC_MODE_PORT_MODE_MII;
10766 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10768 /* reset to prevent losing 1st rx packet intermittently */
10769 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10770 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10772 tw32_f(MAC_RX_MODE, tp->rx_mode);
10774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10775 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10776 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10777 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10778 mac_mode |= MAC_MODE_LINK_POLARITY;
10779 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10780 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10782 tw32(MAC_MODE, mac_mode);
10790 skb = netdev_alloc_skb(tp->dev, tx_len);
10794 tx_data = skb_put(skb, tx_len);
10795 memcpy(tx_data, tp->dev->dev_addr, 6);
10796 memset(tx_data + 6, 0x0, 8);
10798 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10800 for (i = 14; i < tx_len; i++)
10801 tx_data[i] = (u8) (i & 0xff);
10803 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10804 if (pci_dma_mapping_error(tp->pdev, map)) {
10805 dev_kfree_skb(skb);
10809 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10814 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10818 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10823 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10824 tr32_mailbox(tnapi->prodmbox);
10828 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10829 for (i = 0; i < 35; i++) {
10830 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10835 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10836 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10837 if ((tx_idx == tnapi->tx_prod) &&
10838 (rx_idx == (rx_start_idx + num_pkts)))
10842 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10843 dev_kfree_skb(skb);
10845 if (tx_idx != tnapi->tx_prod)
10848 if (rx_idx != rx_start_idx + num_pkts)
10851 desc = &rnapi->rx_rcb[rx_start_idx];
10852 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10853 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10854 if (opaque_key != RXD_OPAQUE_RING_STD)
10857 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10858 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10861 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10862 if (rx_len != tx_len)
10865 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10867 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10868 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10870 for (i = 14; i < tx_len; i++) {
10871 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10876 /* tg3_free_rings will unmap and free the rx_skb */
10881 #define TG3_MAC_LOOPBACK_FAILED 1
10882 #define TG3_PHY_LOOPBACK_FAILED 2
10883 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10884 TG3_PHY_LOOPBACK_FAILED)
10886 static int tg3_test_loopback(struct tg3 *tp)
10891 if (!netif_running(tp->dev))
10892 return TG3_LOOPBACK_FAILED;
10894 err = tg3_reset_hw(tp, 1);
10896 return TG3_LOOPBACK_FAILED;
10898 /* Turn off gphy autopowerdown. */
10899 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10900 tg3_phy_toggle_apd(tp, false);
10902 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10906 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10908 /* Wait for up to 40 microseconds to acquire lock. */
10909 for (i = 0; i < 4; i++) {
10910 status = tr32(TG3_CPMU_MUTEX_GNT);
10911 if (status == CPMU_MUTEX_GNT_DRIVER)
10916 if (status != CPMU_MUTEX_GNT_DRIVER)
10917 return TG3_LOOPBACK_FAILED;
10919 /* Turn off link-based power management. */
10920 cpmuctrl = tr32(TG3_CPMU_CTRL);
10921 tw32(TG3_CPMU_CTRL,
10922 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10923 CPMU_CTRL_LINK_AWARE_MODE));
10926 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10927 err |= TG3_MAC_LOOPBACK_FAILED;
10929 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10930 tw32(TG3_CPMU_CTRL, cpmuctrl);
10932 /* Release the mutex */
10933 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10936 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10937 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10938 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10939 err |= TG3_PHY_LOOPBACK_FAILED;
10942 /* Re-enable gphy autopowerdown. */
10943 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10944 tg3_phy_toggle_apd(tp, true);
10949 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10952 struct tg3 *tp = netdev_priv(dev);
10954 if (tp->link_config.phy_is_low_power)
10955 tg3_set_power_state(tp, PCI_D0);
10957 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10959 if (tg3_test_nvram(tp) != 0) {
10960 etest->flags |= ETH_TEST_FL_FAILED;
10963 if (tg3_test_link(tp) != 0) {
10964 etest->flags |= ETH_TEST_FL_FAILED;
10967 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10968 int err, err2 = 0, irq_sync = 0;
10970 if (netif_running(dev)) {
10972 tg3_netif_stop(tp);
10976 tg3_full_lock(tp, irq_sync);
10978 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10979 err = tg3_nvram_lock(tp);
10980 tg3_halt_cpu(tp, RX_CPU_BASE);
10981 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10982 tg3_halt_cpu(tp, TX_CPU_BASE);
10984 tg3_nvram_unlock(tp);
10986 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10989 if (tg3_test_registers(tp) != 0) {
10990 etest->flags |= ETH_TEST_FL_FAILED;
10993 if (tg3_test_memory(tp) != 0) {
10994 etest->flags |= ETH_TEST_FL_FAILED;
10997 if ((data[4] = tg3_test_loopback(tp)) != 0)
10998 etest->flags |= ETH_TEST_FL_FAILED;
11000 tg3_full_unlock(tp);
11002 if (tg3_test_interrupt(tp) != 0) {
11003 etest->flags |= ETH_TEST_FL_FAILED;
11007 tg3_full_lock(tp, 0);
11009 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11010 if (netif_running(dev)) {
11011 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11012 err2 = tg3_restart_hw(tp, 1);
11014 tg3_netif_start(tp);
11017 tg3_full_unlock(tp);
11019 if (irq_sync && !err2)
11022 if (tp->link_config.phy_is_low_power)
11023 tg3_set_power_state(tp, PCI_D3hot);
11027 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11029 struct mii_ioctl_data *data = if_mii(ifr);
11030 struct tg3 *tp = netdev_priv(dev);
11033 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11034 struct phy_device *phydev;
11035 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
11037 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11038 return phy_mii_ioctl(phydev, data, cmd);
11043 data->phy_id = tp->phy_addr;
11046 case SIOCGMIIREG: {
11049 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11050 break; /* We have no PHY */
11052 if (tp->link_config.phy_is_low_power)
11055 spin_lock_bh(&tp->lock);
11056 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11057 spin_unlock_bh(&tp->lock);
11059 data->val_out = mii_regval;
11065 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11066 break; /* We have no PHY */
11068 if (tp->link_config.phy_is_low_power)
11071 spin_lock_bh(&tp->lock);
11072 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11073 spin_unlock_bh(&tp->lock);
11081 return -EOPNOTSUPP;
11084 #if TG3_VLAN_TAG_USED
11085 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11087 struct tg3 *tp = netdev_priv(dev);
11089 if (!netif_running(dev)) {
11094 tg3_netif_stop(tp);
11096 tg3_full_lock(tp, 0);
11100 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11101 __tg3_set_rx_mode(dev);
11103 tg3_netif_start(tp);
11105 tg3_full_unlock(tp);
11109 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11111 struct tg3 *tp = netdev_priv(dev);
11113 memcpy(ec, &tp->coal, sizeof(*ec));
11117 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11119 struct tg3 *tp = netdev_priv(dev);
11120 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11121 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11123 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11124 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11125 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11126 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11127 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11130 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11131 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11132 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11133 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11134 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11135 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11136 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11137 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11138 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11139 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11142 /* No rx interrupts will be generated if both are zero */
11143 if ((ec->rx_coalesce_usecs == 0) &&
11144 (ec->rx_max_coalesced_frames == 0))
11147 /* No tx interrupts will be generated if both are zero */
11148 if ((ec->tx_coalesce_usecs == 0) &&
11149 (ec->tx_max_coalesced_frames == 0))
11152 /* Only copy relevant parameters, ignore all others. */
11153 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11154 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11155 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11156 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11157 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11158 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11159 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11160 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11161 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11163 if (netif_running(dev)) {
11164 tg3_full_lock(tp, 0);
11165 __tg3_set_coalesce(tp, &tp->coal);
11166 tg3_full_unlock(tp);
11171 static const struct ethtool_ops tg3_ethtool_ops = {
11172 .get_settings = tg3_get_settings,
11173 .set_settings = tg3_set_settings,
11174 .get_drvinfo = tg3_get_drvinfo,
11175 .get_regs_len = tg3_get_regs_len,
11176 .get_regs = tg3_get_regs,
11177 .get_wol = tg3_get_wol,
11178 .set_wol = tg3_set_wol,
11179 .get_msglevel = tg3_get_msglevel,
11180 .set_msglevel = tg3_set_msglevel,
11181 .nway_reset = tg3_nway_reset,
11182 .get_link = ethtool_op_get_link,
11183 .get_eeprom_len = tg3_get_eeprom_len,
11184 .get_eeprom = tg3_get_eeprom,
11185 .set_eeprom = tg3_set_eeprom,
11186 .get_ringparam = tg3_get_ringparam,
11187 .set_ringparam = tg3_set_ringparam,
11188 .get_pauseparam = tg3_get_pauseparam,
11189 .set_pauseparam = tg3_set_pauseparam,
11190 .get_rx_csum = tg3_get_rx_csum,
11191 .set_rx_csum = tg3_set_rx_csum,
11192 .set_tx_csum = tg3_set_tx_csum,
11193 .set_sg = ethtool_op_set_sg,
11194 .set_tso = tg3_set_tso,
11195 .self_test = tg3_self_test,
11196 .get_strings = tg3_get_strings,
11197 .phys_id = tg3_phys_id,
11198 .get_ethtool_stats = tg3_get_ethtool_stats,
11199 .get_coalesce = tg3_get_coalesce,
11200 .set_coalesce = tg3_set_coalesce,
11201 .get_sset_count = tg3_get_sset_count,
11204 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11206 u32 cursize, val, magic;
11208 tp->nvram_size = EEPROM_CHIP_SIZE;
11210 if (tg3_nvram_read(tp, 0, &magic) != 0)
11213 if ((magic != TG3_EEPROM_MAGIC) &&
11214 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11215 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11219 * Size the chip by reading offsets at increasing powers of two.
11220 * When we encounter our validation signature, we know the addressing
11221 * has wrapped around, and thus have our chip size.
11225 while (cursize < tp->nvram_size) {
11226 if (tg3_nvram_read(tp, cursize, &val) != 0)
11235 tp->nvram_size = cursize;
11238 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11242 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11243 tg3_nvram_read(tp, 0, &val) != 0)
11246 /* Selfboot format */
11247 if (val != TG3_EEPROM_MAGIC) {
11248 tg3_get_eeprom_size(tp);
11252 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11254 /* This is confusing. We want to operate on the
11255 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11256 * call will read from NVRAM and byteswap the data
11257 * according to the byteswapping settings for all
11258 * other register accesses. This ensures the data we
11259 * want will always reside in the lower 16-bits.
11260 * However, the data in NVRAM is in LE format, which
11261 * means the data from the NVRAM read will always be
11262 * opposite the endianness of the CPU. The 16-bit
11263 * byteswap then brings the data to CPU endianness.
11265 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11269 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11272 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11276 nvcfg1 = tr32(NVRAM_CFG1);
11277 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11278 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11280 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11281 tw32(NVRAM_CFG1, nvcfg1);
11284 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11285 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11286 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11287 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11288 tp->nvram_jedecnum = JEDEC_ATMEL;
11289 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11290 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11292 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11293 tp->nvram_jedecnum = JEDEC_ATMEL;
11294 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11296 case FLASH_VENDOR_ATMEL_EEPROM:
11297 tp->nvram_jedecnum = JEDEC_ATMEL;
11298 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11299 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11301 case FLASH_VENDOR_ST:
11302 tp->nvram_jedecnum = JEDEC_ST;
11303 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11304 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11306 case FLASH_VENDOR_SAIFUN:
11307 tp->nvram_jedecnum = JEDEC_SAIFUN;
11308 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11310 case FLASH_VENDOR_SST_SMALL:
11311 case FLASH_VENDOR_SST_LARGE:
11312 tp->nvram_jedecnum = JEDEC_SST;
11313 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11317 tp->nvram_jedecnum = JEDEC_ATMEL;
11318 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11319 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11323 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11325 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11326 case FLASH_5752PAGE_SIZE_256:
11327 tp->nvram_pagesize = 256;
11329 case FLASH_5752PAGE_SIZE_512:
11330 tp->nvram_pagesize = 512;
11332 case FLASH_5752PAGE_SIZE_1K:
11333 tp->nvram_pagesize = 1024;
11335 case FLASH_5752PAGE_SIZE_2K:
11336 tp->nvram_pagesize = 2048;
11338 case FLASH_5752PAGE_SIZE_4K:
11339 tp->nvram_pagesize = 4096;
11341 case FLASH_5752PAGE_SIZE_264:
11342 tp->nvram_pagesize = 264;
11344 case FLASH_5752PAGE_SIZE_528:
11345 tp->nvram_pagesize = 528;
11350 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11354 nvcfg1 = tr32(NVRAM_CFG1);
11356 /* NVRAM protection for TPM */
11357 if (nvcfg1 & (1 << 27))
11358 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11360 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11361 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11362 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11363 tp->nvram_jedecnum = JEDEC_ATMEL;
11364 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11366 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11367 tp->nvram_jedecnum = JEDEC_ATMEL;
11368 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11369 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11371 case FLASH_5752VENDOR_ST_M45PE10:
11372 case FLASH_5752VENDOR_ST_M45PE20:
11373 case FLASH_5752VENDOR_ST_M45PE40:
11374 tp->nvram_jedecnum = JEDEC_ST;
11375 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11376 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11380 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11381 tg3_nvram_get_pagesize(tp, nvcfg1);
11383 /* For eeprom, set pagesize to maximum eeprom size */
11384 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11386 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11387 tw32(NVRAM_CFG1, nvcfg1);
11391 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11393 u32 nvcfg1, protect = 0;
11395 nvcfg1 = tr32(NVRAM_CFG1);
11397 /* NVRAM protection for TPM */
11398 if (nvcfg1 & (1 << 27)) {
11399 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11403 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11405 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11406 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11407 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11408 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11409 tp->nvram_jedecnum = JEDEC_ATMEL;
11410 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11411 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11412 tp->nvram_pagesize = 264;
11413 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11414 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11415 tp->nvram_size = (protect ? 0x3e200 :
11416 TG3_NVRAM_SIZE_512KB);
11417 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11418 tp->nvram_size = (protect ? 0x1f200 :
11419 TG3_NVRAM_SIZE_256KB);
11421 tp->nvram_size = (protect ? 0x1f200 :
11422 TG3_NVRAM_SIZE_128KB);
11424 case FLASH_5752VENDOR_ST_M45PE10:
11425 case FLASH_5752VENDOR_ST_M45PE20:
11426 case FLASH_5752VENDOR_ST_M45PE40:
11427 tp->nvram_jedecnum = JEDEC_ST;
11428 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11429 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11430 tp->nvram_pagesize = 256;
11431 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11432 tp->nvram_size = (protect ?
11433 TG3_NVRAM_SIZE_64KB :
11434 TG3_NVRAM_SIZE_128KB);
11435 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11436 tp->nvram_size = (protect ?
11437 TG3_NVRAM_SIZE_64KB :
11438 TG3_NVRAM_SIZE_256KB);
11440 tp->nvram_size = (protect ?
11441 TG3_NVRAM_SIZE_128KB :
11442 TG3_NVRAM_SIZE_512KB);
11447 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11451 nvcfg1 = tr32(NVRAM_CFG1);
11453 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11454 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11455 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11456 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11457 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11458 tp->nvram_jedecnum = JEDEC_ATMEL;
11459 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11460 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11462 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11463 tw32(NVRAM_CFG1, nvcfg1);
11465 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11466 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11467 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11468 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11469 tp->nvram_jedecnum = JEDEC_ATMEL;
11470 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11471 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11472 tp->nvram_pagesize = 264;
11474 case FLASH_5752VENDOR_ST_M45PE10:
11475 case FLASH_5752VENDOR_ST_M45PE20:
11476 case FLASH_5752VENDOR_ST_M45PE40:
11477 tp->nvram_jedecnum = JEDEC_ST;
11478 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11479 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11480 tp->nvram_pagesize = 256;
11485 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11487 u32 nvcfg1, protect = 0;
11489 nvcfg1 = tr32(NVRAM_CFG1);
11491 /* NVRAM protection for TPM */
11492 if (nvcfg1 & (1 << 27)) {
11493 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11497 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11499 case FLASH_5761VENDOR_ATMEL_ADB021D:
11500 case FLASH_5761VENDOR_ATMEL_ADB041D:
11501 case FLASH_5761VENDOR_ATMEL_ADB081D:
11502 case FLASH_5761VENDOR_ATMEL_ADB161D:
11503 case FLASH_5761VENDOR_ATMEL_MDB021D:
11504 case FLASH_5761VENDOR_ATMEL_MDB041D:
11505 case FLASH_5761VENDOR_ATMEL_MDB081D:
11506 case FLASH_5761VENDOR_ATMEL_MDB161D:
11507 tp->nvram_jedecnum = JEDEC_ATMEL;
11508 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11509 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11510 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11511 tp->nvram_pagesize = 256;
11513 case FLASH_5761VENDOR_ST_A_M45PE20:
11514 case FLASH_5761VENDOR_ST_A_M45PE40:
11515 case FLASH_5761VENDOR_ST_A_M45PE80:
11516 case FLASH_5761VENDOR_ST_A_M45PE16:
11517 case FLASH_5761VENDOR_ST_M_M45PE20:
11518 case FLASH_5761VENDOR_ST_M_M45PE40:
11519 case FLASH_5761VENDOR_ST_M_M45PE80:
11520 case FLASH_5761VENDOR_ST_M_M45PE16:
11521 tp->nvram_jedecnum = JEDEC_ST;
11522 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11523 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11524 tp->nvram_pagesize = 256;
11529 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11532 case FLASH_5761VENDOR_ATMEL_ADB161D:
11533 case FLASH_5761VENDOR_ATMEL_MDB161D:
11534 case FLASH_5761VENDOR_ST_A_M45PE16:
11535 case FLASH_5761VENDOR_ST_M_M45PE16:
11536 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11538 case FLASH_5761VENDOR_ATMEL_ADB081D:
11539 case FLASH_5761VENDOR_ATMEL_MDB081D:
11540 case FLASH_5761VENDOR_ST_A_M45PE80:
11541 case FLASH_5761VENDOR_ST_M_M45PE80:
11542 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11544 case FLASH_5761VENDOR_ATMEL_ADB041D:
11545 case FLASH_5761VENDOR_ATMEL_MDB041D:
11546 case FLASH_5761VENDOR_ST_A_M45PE40:
11547 case FLASH_5761VENDOR_ST_M_M45PE40:
11548 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11550 case FLASH_5761VENDOR_ATMEL_ADB021D:
11551 case FLASH_5761VENDOR_ATMEL_MDB021D:
11552 case FLASH_5761VENDOR_ST_A_M45PE20:
11553 case FLASH_5761VENDOR_ST_M_M45PE20:
11554 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11560 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11562 tp->nvram_jedecnum = JEDEC_ATMEL;
11563 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11564 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11567 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11571 nvcfg1 = tr32(NVRAM_CFG1);
11573 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11574 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11575 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11576 tp->nvram_jedecnum = JEDEC_ATMEL;
11577 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11578 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11580 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11581 tw32(NVRAM_CFG1, nvcfg1);
11583 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11584 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11585 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11586 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11587 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11588 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11589 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11590 tp->nvram_jedecnum = JEDEC_ATMEL;
11591 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11592 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11594 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11595 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11596 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11597 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11598 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11600 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11601 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11602 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11604 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11605 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11606 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11610 case FLASH_5752VENDOR_ST_M45PE10:
11611 case FLASH_5752VENDOR_ST_M45PE20:
11612 case FLASH_5752VENDOR_ST_M45PE40:
11613 tp->nvram_jedecnum = JEDEC_ST;
11614 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11615 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11617 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11618 case FLASH_5752VENDOR_ST_M45PE10:
11619 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11621 case FLASH_5752VENDOR_ST_M45PE20:
11622 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11624 case FLASH_5752VENDOR_ST_M45PE40:
11625 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11630 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11634 tg3_nvram_get_pagesize(tp, nvcfg1);
11635 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11636 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11640 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11644 nvcfg1 = tr32(NVRAM_CFG1);
11646 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11647 case FLASH_5717VENDOR_ATMEL_EEPROM:
11648 case FLASH_5717VENDOR_MICRO_EEPROM:
11649 tp->nvram_jedecnum = JEDEC_ATMEL;
11650 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11651 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11653 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11654 tw32(NVRAM_CFG1, nvcfg1);
11656 case FLASH_5717VENDOR_ATMEL_MDB011D:
11657 case FLASH_5717VENDOR_ATMEL_ADB011B:
11658 case FLASH_5717VENDOR_ATMEL_ADB011D:
11659 case FLASH_5717VENDOR_ATMEL_MDB021D:
11660 case FLASH_5717VENDOR_ATMEL_ADB021B:
11661 case FLASH_5717VENDOR_ATMEL_ADB021D:
11662 case FLASH_5717VENDOR_ATMEL_45USPT:
11663 tp->nvram_jedecnum = JEDEC_ATMEL;
11664 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11665 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11667 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11668 case FLASH_5717VENDOR_ATMEL_MDB021D:
11669 case FLASH_5717VENDOR_ATMEL_ADB021B:
11670 case FLASH_5717VENDOR_ATMEL_ADB021D:
11671 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11674 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11678 case FLASH_5717VENDOR_ST_M_M25PE10:
11679 case FLASH_5717VENDOR_ST_A_M25PE10:
11680 case FLASH_5717VENDOR_ST_M_M45PE10:
11681 case FLASH_5717VENDOR_ST_A_M45PE10:
11682 case FLASH_5717VENDOR_ST_M_M25PE20:
11683 case FLASH_5717VENDOR_ST_A_M25PE20:
11684 case FLASH_5717VENDOR_ST_M_M45PE20:
11685 case FLASH_5717VENDOR_ST_A_M45PE20:
11686 case FLASH_5717VENDOR_ST_25USPT:
11687 case FLASH_5717VENDOR_ST_45USPT:
11688 tp->nvram_jedecnum = JEDEC_ST;
11689 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11690 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11692 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11693 case FLASH_5717VENDOR_ST_M_M25PE20:
11694 case FLASH_5717VENDOR_ST_A_M25PE20:
11695 case FLASH_5717VENDOR_ST_M_M45PE20:
11696 case FLASH_5717VENDOR_ST_A_M45PE20:
11697 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11700 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11705 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11709 tg3_nvram_get_pagesize(tp, nvcfg1);
11710 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11711 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11714 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11715 static void __devinit tg3_nvram_init(struct tg3 *tp)
11717 tw32_f(GRC_EEPROM_ADDR,
11718 (EEPROM_ADDR_FSM_RESET |
11719 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11720 EEPROM_ADDR_CLKPERD_SHIFT)));
11724 /* Enable seeprom accesses. */
11725 tw32_f(GRC_LOCAL_CTRL,
11726 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11729 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11730 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11731 tp->tg3_flags |= TG3_FLAG_NVRAM;
11733 if (tg3_nvram_lock(tp)) {
11734 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11735 "tg3_nvram_init failed.\n", tp->dev->name);
11738 tg3_enable_nvram_access(tp);
11740 tp->nvram_size = 0;
11742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11743 tg3_get_5752_nvram_info(tp);
11744 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11745 tg3_get_5755_nvram_info(tp);
11746 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11749 tg3_get_5787_nvram_info(tp);
11750 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11751 tg3_get_5761_nvram_info(tp);
11752 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11753 tg3_get_5906_nvram_info(tp);
11754 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11756 tg3_get_57780_nvram_info(tp);
11757 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11758 tg3_get_5717_nvram_info(tp);
11760 tg3_get_nvram_info(tp);
11762 if (tp->nvram_size == 0)
11763 tg3_get_nvram_size(tp);
11765 tg3_disable_nvram_access(tp);
11766 tg3_nvram_unlock(tp);
11769 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11771 tg3_get_eeprom_size(tp);
11775 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11776 u32 offset, u32 len, u8 *buf)
11781 for (i = 0; i < len; i += 4) {
11787 memcpy(&data, buf + i, 4);
11790 * The SEEPROM interface expects the data to always be opposite
11791 * the native endian format. We accomplish this by reversing
11792 * all the operations that would have been performed on the
11793 * data from a call to tg3_nvram_read_be32().
11795 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11797 val = tr32(GRC_EEPROM_ADDR);
11798 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11800 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11802 tw32(GRC_EEPROM_ADDR, val |
11803 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11804 (addr & EEPROM_ADDR_ADDR_MASK) |
11805 EEPROM_ADDR_START |
11806 EEPROM_ADDR_WRITE);
11808 for (j = 0; j < 1000; j++) {
11809 val = tr32(GRC_EEPROM_ADDR);
11811 if (val & EEPROM_ADDR_COMPLETE)
11815 if (!(val & EEPROM_ADDR_COMPLETE)) {
11824 /* offset and length are dword aligned */
11825 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11829 u32 pagesize = tp->nvram_pagesize;
11830 u32 pagemask = pagesize - 1;
11834 tmp = kmalloc(pagesize, GFP_KERNEL);
11840 u32 phy_addr, page_off, size;
11842 phy_addr = offset & ~pagemask;
11844 for (j = 0; j < pagesize; j += 4) {
11845 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11846 (__be32 *) (tmp + j));
11853 page_off = offset & pagemask;
11860 memcpy(tmp + page_off, buf, size);
11862 offset = offset + (pagesize - page_off);
11864 tg3_enable_nvram_access(tp);
11867 * Before we can erase the flash page, we need
11868 * to issue a special "write enable" command.
11870 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11872 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11875 /* Erase the target page */
11876 tw32(NVRAM_ADDR, phy_addr);
11878 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11879 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11881 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11884 /* Issue another write enable to start the write. */
11885 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11887 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11890 for (j = 0; j < pagesize; j += 4) {
11893 data = *((__be32 *) (tmp + j));
11895 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11897 tw32(NVRAM_ADDR, phy_addr + j);
11899 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11903 nvram_cmd |= NVRAM_CMD_FIRST;
11904 else if (j == (pagesize - 4))
11905 nvram_cmd |= NVRAM_CMD_LAST;
11907 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11914 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11915 tg3_nvram_exec_cmd(tp, nvram_cmd);
11922 /* offset and length are dword aligned */
11923 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11928 for (i = 0; i < len; i += 4, offset += 4) {
11929 u32 page_off, phy_addr, nvram_cmd;
11932 memcpy(&data, buf + i, 4);
11933 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11935 page_off = offset % tp->nvram_pagesize;
11937 phy_addr = tg3_nvram_phys_addr(tp, offset);
11939 tw32(NVRAM_ADDR, phy_addr);
11941 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11943 if ((page_off == 0) || (i == 0))
11944 nvram_cmd |= NVRAM_CMD_FIRST;
11945 if (page_off == (tp->nvram_pagesize - 4))
11946 nvram_cmd |= NVRAM_CMD_LAST;
11948 if (i == (len - 4))
11949 nvram_cmd |= NVRAM_CMD_LAST;
11951 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11952 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11953 (tp->nvram_jedecnum == JEDEC_ST) &&
11954 (nvram_cmd & NVRAM_CMD_FIRST)) {
11956 if ((ret = tg3_nvram_exec_cmd(tp,
11957 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11962 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11963 /* We always do complete word writes to eeprom. */
11964 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11967 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11973 /* offset and length are dword aligned */
11974 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11978 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11979 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11980 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11984 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11985 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11990 ret = tg3_nvram_lock(tp);
11994 tg3_enable_nvram_access(tp);
11995 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11996 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11997 tw32(NVRAM_WRITE1, 0x406);
11999 grc_mode = tr32(GRC_MODE);
12000 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12002 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12003 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12005 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12009 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12013 grc_mode = tr32(GRC_MODE);
12014 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12016 tg3_disable_nvram_access(tp);
12017 tg3_nvram_unlock(tp);
12020 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12021 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12028 struct subsys_tbl_ent {
12029 u16 subsys_vendor, subsys_devid;
12033 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
12034 /* Broadcom boards. */
12035 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
12036 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
12037 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
12038 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
12039 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
12040 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
12041 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
12042 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
12043 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
12044 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
12045 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
12048 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
12049 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
12050 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
12051 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
12052 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
12055 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
12056 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
12057 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
12058 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
12060 /* Compaq boards. */
12061 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
12062 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
12063 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
12064 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
12065 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
12068 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
12071 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
12075 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12076 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12077 tp->pdev->subsystem_vendor) &&
12078 (subsys_id_to_phy_id[i].subsys_devid ==
12079 tp->pdev->subsystem_device))
12080 return &subsys_id_to_phy_id[i];
12085 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12090 /* On some early chips the SRAM cannot be accessed in D3hot state,
12091 * so need make sure we're in D0.
12093 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12094 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12095 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12098 /* Make sure register accesses (indirect or otherwise)
12099 * will function correctly.
12101 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12102 tp->misc_host_ctrl);
12104 /* The memory arbiter has to be enabled in order for SRAM accesses
12105 * to succeed. Normally on powerup the tg3 chip firmware will make
12106 * sure it is enabled, but other entities such as system netboot
12107 * code might disable it.
12109 val = tr32(MEMARB_MODE);
12110 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12112 tp->phy_id = PHY_ID_INVALID;
12113 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12115 /* Assume an onboard device and WOL capable by default. */
12116 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12119 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12120 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12121 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12123 val = tr32(VCPU_CFGSHDW);
12124 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12125 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12126 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12127 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12128 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12132 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12133 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12134 u32 nic_cfg, led_cfg;
12135 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12136 int eeprom_phy_serdes = 0;
12138 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12139 tp->nic_sram_data_cfg = nic_cfg;
12141 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12142 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12143 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12144 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12145 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12146 (ver > 0) && (ver < 0x100))
12147 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12150 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12152 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12153 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12154 eeprom_phy_serdes = 1;
12156 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12157 if (nic_phy_id != 0) {
12158 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12159 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12161 eeprom_phy_id = (id1 >> 16) << 10;
12162 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12163 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12167 tp->phy_id = eeprom_phy_id;
12168 if (eeprom_phy_serdes) {
12169 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
12170 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12172 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12175 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12176 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12177 SHASTA_EXT_LED_MODE_MASK);
12179 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12183 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12184 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12187 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12188 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12191 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12192 tp->led_ctrl = LED_CTRL_MODE_MAC;
12194 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12195 * read on some older 5700/5701 bootcode.
12197 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12199 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12201 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12205 case SHASTA_EXT_LED_SHARED:
12206 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12207 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12208 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12209 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12210 LED_CTRL_MODE_PHY_2);
12213 case SHASTA_EXT_LED_MAC:
12214 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12217 case SHASTA_EXT_LED_COMBO:
12218 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12219 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12220 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12221 LED_CTRL_MODE_PHY_2);
12226 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12228 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12229 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12231 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12232 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12234 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12235 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12236 if ((tp->pdev->subsystem_vendor ==
12237 PCI_VENDOR_ID_ARIMA) &&
12238 (tp->pdev->subsystem_device == 0x205a ||
12239 tp->pdev->subsystem_device == 0x2063))
12240 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12242 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12243 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12246 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12247 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12248 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12249 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12252 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12253 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12254 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12256 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
12257 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12258 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12260 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12261 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12262 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12264 if (cfg2 & (1 << 17))
12265 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
12267 /* serdes signal pre-emphasis in register 0x590 set by */
12268 /* bootcode if bit 18 is set */
12269 if (cfg2 & (1 << 18))
12270 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
12272 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12273 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12274 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12275 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
12277 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12280 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12281 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12282 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12285 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
12286 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
12287 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12288 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12289 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12290 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12293 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12294 device_set_wakeup_enable(&tp->pdev->dev,
12295 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12298 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12303 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12304 tw32(OTP_CTRL, cmd);
12306 /* Wait for up to 1 ms for command to execute. */
12307 for (i = 0; i < 100; i++) {
12308 val = tr32(OTP_STATUS);
12309 if (val & OTP_STATUS_CMD_DONE)
12314 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12317 /* Read the gphy configuration from the OTP region of the chip. The gphy
12318 * configuration is a 32-bit value that straddles the alignment boundary.
12319 * We do two 32-bit reads and then shift and merge the results.
12321 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12323 u32 bhalf_otp, thalf_otp;
12325 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12327 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12330 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12332 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12335 thalf_otp = tr32(OTP_READ_DATA);
12337 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12339 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12342 bhalf_otp = tr32(OTP_READ_DATA);
12344 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12347 static int __devinit tg3_phy_probe(struct tg3 *tp)
12349 u32 hw_phy_id_1, hw_phy_id_2;
12350 u32 hw_phy_id, hw_phy_id_masked;
12353 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12354 return tg3_phy_init(tp);
12356 /* Reading the PHY ID register can conflict with ASF
12357 * firmware access to the PHY hardware.
12360 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12361 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12362 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
12364 /* Now read the physical PHY_ID from the chip and verify
12365 * that it is sane. If it doesn't look good, we fall back
12366 * to either the hard-coded table based PHY_ID and failing
12367 * that the value found in the eeprom area.
12369 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12370 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12372 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12373 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12374 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12376 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
12379 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
12380 tp->phy_id = hw_phy_id;
12381 if (hw_phy_id_masked == PHY_ID_BCM8002)
12382 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12384 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12386 if (tp->phy_id != PHY_ID_INVALID) {
12387 /* Do nothing, phy ID already set up in
12388 * tg3_get_eeprom_hw_cfg().
12391 struct subsys_tbl_ent *p;
12393 /* No eeprom signature? Try the hardcoded
12394 * subsys device table.
12396 p = lookup_by_subsys(tp);
12400 tp->phy_id = p->phy_id;
12402 tp->phy_id == PHY_ID_BCM8002)
12403 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12407 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12408 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12409 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12410 u32 bmsr, adv_reg, tg3_ctrl, mask;
12412 tg3_readphy(tp, MII_BMSR, &bmsr);
12413 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12414 (bmsr & BMSR_LSTATUS))
12415 goto skip_phy_reset;
12417 err = tg3_phy_reset(tp);
12421 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12422 ADVERTISE_100HALF | ADVERTISE_100FULL |
12423 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12425 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12426 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12427 MII_TG3_CTRL_ADV_1000_FULL);
12428 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12429 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12430 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12431 MII_TG3_CTRL_ENABLE_AS_MASTER);
12434 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12435 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12436 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12437 if (!tg3_copper_is_advertising_all(tp, mask)) {
12438 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12440 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12441 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12443 tg3_writephy(tp, MII_BMCR,
12444 BMCR_ANENABLE | BMCR_ANRESTART);
12446 tg3_phy_set_wirespeed(tp);
12448 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12449 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12450 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12454 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
12455 err = tg3_init_5401phy_dsp(tp);
12460 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
12461 err = tg3_init_5401phy_dsp(tp);
12464 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12465 tp->link_config.advertising =
12466 (ADVERTISED_1000baseT_Half |
12467 ADVERTISED_1000baseT_Full |
12468 ADVERTISED_Autoneg |
12470 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12471 tp->link_config.advertising &=
12472 ~(ADVERTISED_1000baseT_Half |
12473 ADVERTISED_1000baseT_Full);
12478 static void __devinit tg3_read_partno(struct tg3 *tp)
12480 unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
12484 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12485 tg3_nvram_read(tp, 0x0, &magic))
12486 goto out_not_found;
12488 if (magic == TG3_EEPROM_MAGIC) {
12489 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12492 /* The data is in little-endian format in NVRAM.
12493 * Use the big-endian read routines to preserve
12494 * the byte order as it exists in NVRAM.
12496 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12497 goto out_not_found;
12499 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12503 unsigned int pos = 0, i = 0;
12505 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12506 cnt = pci_read_vpd(tp->pdev, pos,
12507 TG3_NVM_VPD_LEN - pos,
12509 if (cnt == -ETIMEDOUT || -EINTR)
12512 goto out_not_found;
12514 if (pos != TG3_NVM_VPD_LEN)
12515 goto out_not_found;
12518 /* Now parse and find the part number. */
12519 for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) {
12520 unsigned char val = vpd_data[i];
12521 unsigned int block_end;
12523 if (val == 0x82 || val == 0x91) {
12526 (vpd_data[i + 2] << 8)));
12531 goto out_not_found;
12533 block_end = (i + 3 +
12535 (vpd_data[i + 2] << 8)));
12538 if (block_end > TG3_NVM_VPD_LEN)
12539 goto out_not_found;
12541 while (i < (block_end - 2)) {
12542 if (vpd_data[i + 0] == 'P' &&
12543 vpd_data[i + 1] == 'N') {
12544 int partno_len = vpd_data[i + 2];
12547 if (partno_len > TG3_BPN_SIZE ||
12548 (partno_len + i) > TG3_NVM_VPD_LEN)
12549 goto out_not_found;
12551 memcpy(tp->board_part_number,
12552 &vpd_data[i], partno_len);
12557 i += 3 + vpd_data[i + 2];
12560 /* Part number not found. */
12561 goto out_not_found;
12565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12566 strcpy(tp->board_part_number, "BCM95906");
12567 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12568 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12569 strcpy(tp->board_part_number, "BCM57780");
12570 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12571 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12572 strcpy(tp->board_part_number, "BCM57760");
12573 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12575 strcpy(tp->board_part_number, "BCM57790");
12576 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12577 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12578 strcpy(tp->board_part_number, "BCM57788");
12579 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12580 strcpy(tp->board_part_number, "BCM57765");
12582 strcpy(tp->board_part_number, "none");
12585 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12589 if (tg3_nvram_read(tp, offset, &val) ||
12590 (val & 0xfc000000) != 0x0c000000 ||
12591 tg3_nvram_read(tp, offset + 4, &val) ||
12598 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12600 u32 val, offset, start, ver_offset;
12602 bool newver = false;
12604 if (tg3_nvram_read(tp, 0xc, &offset) ||
12605 tg3_nvram_read(tp, 0x4, &start))
12608 offset = tg3_nvram_logical_addr(tp, offset);
12610 if (tg3_nvram_read(tp, offset, &val))
12613 if ((val & 0xfc000000) == 0x0c000000) {
12614 if (tg3_nvram_read(tp, offset + 4, &val))
12622 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12625 offset = offset + ver_offset - start;
12626 for (i = 0; i < 16; i += 4) {
12628 if (tg3_nvram_read_be32(tp, offset + i, &v))
12631 memcpy(tp->fw_ver + i, &v, sizeof(v));
12636 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12639 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12640 TG3_NVM_BCVER_MAJSFT;
12641 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12642 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12646 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12648 u32 val, major, minor;
12650 /* Use native endian representation */
12651 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12654 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12655 TG3_NVM_HWSB_CFG1_MAJSFT;
12656 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12657 TG3_NVM_HWSB_CFG1_MINSFT;
12659 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12662 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12664 u32 offset, major, minor, build;
12666 tp->fw_ver[0] = 's';
12667 tp->fw_ver[1] = 'b';
12668 tp->fw_ver[2] = '\0';
12670 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12673 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12674 case TG3_EEPROM_SB_REVISION_0:
12675 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12677 case TG3_EEPROM_SB_REVISION_2:
12678 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12680 case TG3_EEPROM_SB_REVISION_3:
12681 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12687 if (tg3_nvram_read(tp, offset, &val))
12690 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12691 TG3_EEPROM_SB_EDH_BLD_SHFT;
12692 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12693 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12694 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12696 if (minor > 99 || build > 26)
12699 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12702 tp->fw_ver[8] = 'a' + build - 1;
12703 tp->fw_ver[9] = '\0';
12707 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12709 u32 val, offset, start;
12712 for (offset = TG3_NVM_DIR_START;
12713 offset < TG3_NVM_DIR_END;
12714 offset += TG3_NVM_DIRENT_SIZE) {
12715 if (tg3_nvram_read(tp, offset, &val))
12718 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12722 if (offset == TG3_NVM_DIR_END)
12725 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12726 start = 0x08000000;
12727 else if (tg3_nvram_read(tp, offset - 4, &start))
12730 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12731 !tg3_fw_img_is_valid(tp, offset) ||
12732 tg3_nvram_read(tp, offset + 8, &val))
12735 offset += val - start;
12737 vlen = strlen(tp->fw_ver);
12739 tp->fw_ver[vlen++] = ',';
12740 tp->fw_ver[vlen++] = ' ';
12742 for (i = 0; i < 4; i++) {
12744 if (tg3_nvram_read_be32(tp, offset, &v))
12747 offset += sizeof(v);
12749 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12750 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12754 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12759 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12764 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12765 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12768 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12769 if (apedata != APE_SEG_SIG_MAGIC)
12772 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12773 if (!(apedata & APE_FW_STATUS_READY))
12776 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12778 vlen = strlen(tp->fw_ver);
12780 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12781 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12782 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12783 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12784 (apedata & APE_FW_VERSION_BLDMSK));
12787 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12791 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12792 tp->fw_ver[0] = 's';
12793 tp->fw_ver[1] = 'b';
12794 tp->fw_ver[2] = '\0';
12799 if (tg3_nvram_read(tp, 0, &val))
12802 if (val == TG3_EEPROM_MAGIC)
12803 tg3_read_bc_ver(tp);
12804 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12805 tg3_read_sb_ver(tp, val);
12806 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12807 tg3_read_hwsb_ver(tp);
12811 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12812 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12815 tg3_read_mgmtfw_ver(tp);
12817 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12820 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12822 static int __devinit tg3_get_invariants(struct tg3 *tp)
12824 static struct pci_device_id write_reorder_chipsets[] = {
12825 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12826 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12827 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12828 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12829 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12830 PCI_DEVICE_ID_VIA_8385_0) },
12834 u32 pci_state_reg, grc_misc_cfg;
12839 /* Force memory write invalidate off. If we leave it on,
12840 * then on 5700_BX chips we have to enable a workaround.
12841 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12842 * to match the cacheline size. The Broadcom driver have this
12843 * workaround but turns MWI off all the times so never uses
12844 * it. This seems to suggest that the workaround is insufficient.
12846 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12847 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12848 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12850 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12851 * has the register indirect write enable bit set before
12852 * we try to access any of the MMIO registers. It is also
12853 * critical that the PCI-X hw workaround situation is decided
12854 * before that as well.
12856 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12859 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12860 MISC_HOST_CTRL_CHIPREV_SHIFT);
12861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12862 u32 prod_id_asic_rev;
12864 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12865 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12866 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12867 pci_read_config_dword(tp->pdev,
12868 TG3PCI_GEN2_PRODID_ASICREV,
12869 &prod_id_asic_rev);
12870 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12873 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12874 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12875 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12876 pci_read_config_dword(tp->pdev,
12877 TG3PCI_GEN15_PRODID_ASICREV,
12878 &prod_id_asic_rev);
12880 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12881 &prod_id_asic_rev);
12883 tp->pci_chip_rev_id = prod_id_asic_rev;
12886 /* Wrong chip ID in 5752 A0. This code can be removed later
12887 * as A0 is not in production.
12889 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12890 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12892 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12893 * we need to disable memory and use config. cycles
12894 * only to access all registers. The 5702/03 chips
12895 * can mistakenly decode the special cycles from the
12896 * ICH chipsets as memory write cycles, causing corruption
12897 * of register and memory space. Only certain ICH bridges
12898 * will drive special cycles with non-zero data during the
12899 * address phase which can fall within the 5703's address
12900 * range. This is not an ICH bug as the PCI spec allows
12901 * non-zero address during special cycles. However, only
12902 * these ICH bridges are known to drive non-zero addresses
12903 * during special cycles.
12905 * Since special cycles do not cross PCI bridges, we only
12906 * enable this workaround if the 5703 is on the secondary
12907 * bus of these ICH bridges.
12909 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12910 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12911 static struct tg3_dev_id {
12915 } ich_chipsets[] = {
12916 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12918 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12920 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12922 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12926 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12927 struct pci_dev *bridge = NULL;
12929 while (pci_id->vendor != 0) {
12930 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12936 if (pci_id->rev != PCI_ANY_ID) {
12937 if (bridge->revision > pci_id->rev)
12940 if (bridge->subordinate &&
12941 (bridge->subordinate->number ==
12942 tp->pdev->bus->number)) {
12944 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12945 pci_dev_put(bridge);
12951 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12952 static struct tg3_dev_id {
12955 } bridge_chipsets[] = {
12956 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12957 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12960 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12961 struct pci_dev *bridge = NULL;
12963 while (pci_id->vendor != 0) {
12964 bridge = pci_get_device(pci_id->vendor,
12971 if (bridge->subordinate &&
12972 (bridge->subordinate->number <=
12973 tp->pdev->bus->number) &&
12974 (bridge->subordinate->subordinate >=
12975 tp->pdev->bus->number)) {
12976 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12977 pci_dev_put(bridge);
12983 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12984 * DMA addresses > 40-bit. This bridge may have other additional
12985 * 57xx devices behind it in some 4-port NIC designs for example.
12986 * Any tg3 device found behind the bridge will also need the 40-bit
12989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12991 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12992 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12993 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12996 struct pci_dev *bridge = NULL;
12999 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13000 PCI_DEVICE_ID_SERVERWORKS_EPB,
13002 if (bridge && bridge->subordinate &&
13003 (bridge->subordinate->number <=
13004 tp->pdev->bus->number) &&
13005 (bridge->subordinate->subordinate >=
13006 tp->pdev->bus->number)) {
13007 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13008 pci_dev_put(bridge);
13014 /* Initialize misc host control in PCI block. */
13015 tp->misc_host_ctrl |= (misc_ctrl_reg &
13016 MISC_HOST_CTRL_CHIPREV);
13017 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13018 tp->misc_host_ctrl);
13020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13023 tp->pdev_peer = tg3_find_peer(tp);
13025 /* Intentionally exclude ASIC_REV_5906 */
13026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13032 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13034 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13039 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13040 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13041 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13043 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13044 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13045 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13047 /* 5700 B0 chips do not support checksumming correctly due
13048 * to hardware bugs.
13050 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13051 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13053 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13054 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13055 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13056 tp->dev->features |= NETIF_F_IPV6_CSUM;
13059 /* Determine TSO capabilities */
13060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13062 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13063 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13065 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13066 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13067 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13068 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13069 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13070 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13071 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13072 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13073 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13074 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13076 tp->fw_needed = FIRMWARE_TG3TSO5;
13078 tp->fw_needed = FIRMWARE_TG3TSO;
13083 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13084 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13085 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13086 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13087 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13088 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13089 tp->pdev_peer == tp->pdev))
13090 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13092 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13094 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13097 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13099 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13100 tp->irq_max = TG3_IRQ_MAX_VECS;
13104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13106 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13107 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13108 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13109 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13114 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13116 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13117 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13118 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13119 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13121 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13124 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13125 if (tp->pcie_cap != 0) {
13128 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13130 pcie_set_readrq(tp->pdev, 4096);
13132 pci_read_config_word(tp->pdev,
13133 tp->pcie_cap + PCI_EXP_LNKCTL,
13135 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13137 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13140 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13141 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13142 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13143 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13144 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13146 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13147 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13148 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13149 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13150 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13151 if (!tp->pcix_cap) {
13152 printk(KERN_ERR PFX "Cannot find PCI-X "
13153 "capability, aborting.\n");
13157 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13158 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13161 /* If we have an AMD 762 or VIA K8T800 chipset, write
13162 * reordering to the mailbox registers done by the host
13163 * controller can cause major troubles. We read back from
13164 * every mailbox register write to force the writes to be
13165 * posted to the chip in order.
13167 if (pci_dev_present(write_reorder_chipsets) &&
13168 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13169 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13171 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13172 &tp->pci_cacheline_sz);
13173 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13174 &tp->pci_lat_timer);
13175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13176 tp->pci_lat_timer < 64) {
13177 tp->pci_lat_timer = 64;
13178 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13179 tp->pci_lat_timer);
13182 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13183 /* 5700 BX chips need to have their TX producer index
13184 * mailboxes written twice to workaround a bug.
13186 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13188 /* If we are in PCI-X mode, enable register write workaround.
13190 * The workaround is to use indirect register accesses
13191 * for all chip writes not to mailbox registers.
13193 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13196 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13198 /* The chip can have it's power management PCI config
13199 * space registers clobbered due to this bug.
13200 * So explicitly force the chip into D0 here.
13202 pci_read_config_dword(tp->pdev,
13203 tp->pm_cap + PCI_PM_CTRL,
13205 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13206 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13207 pci_write_config_dword(tp->pdev,
13208 tp->pm_cap + PCI_PM_CTRL,
13211 /* Also, force SERR#/PERR# in PCI command. */
13212 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13213 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13214 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13218 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13219 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13220 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13221 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13223 /* Chip-specific fixup from Broadcom driver */
13224 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13225 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13226 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13227 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13230 /* Default fast path register access methods */
13231 tp->read32 = tg3_read32;
13232 tp->write32 = tg3_write32;
13233 tp->read32_mbox = tg3_read32;
13234 tp->write32_mbox = tg3_write32;
13235 tp->write32_tx_mbox = tg3_write32;
13236 tp->write32_rx_mbox = tg3_write32;
13238 /* Various workaround register access methods */
13239 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13240 tp->write32 = tg3_write_indirect_reg32;
13241 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13242 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13243 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13245 * Back to back register writes can cause problems on these
13246 * chips, the workaround is to read back all reg writes
13247 * except those to mailbox regs.
13249 * See tg3_write_indirect_reg32().
13251 tp->write32 = tg3_write_flush_reg32;
13254 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13255 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13256 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13257 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13258 tp->write32_rx_mbox = tg3_write_flush_reg32;
13261 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13262 tp->read32 = tg3_read_indirect_reg32;
13263 tp->write32 = tg3_write_indirect_reg32;
13264 tp->read32_mbox = tg3_read_indirect_mbox;
13265 tp->write32_mbox = tg3_write_indirect_mbox;
13266 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13267 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13272 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13273 pci_cmd &= ~PCI_COMMAND_MEMORY;
13274 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13277 tp->read32_mbox = tg3_read32_mbox_5906;
13278 tp->write32_mbox = tg3_write32_mbox_5906;
13279 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13280 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13283 if (tp->write32 == tg3_write_indirect_reg32 ||
13284 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13285 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13287 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13289 /* Get eeprom hw config before calling tg3_set_power_state().
13290 * In particular, the TG3_FLG2_IS_NIC flag must be
13291 * determined before calling tg3_set_power_state() so that
13292 * we know whether or not to switch out of Vaux power.
13293 * When the flag is set, it means that GPIO1 is used for eeprom
13294 * write protect and also implies that it is a LOM where GPIOs
13295 * are not used to switch power.
13297 tg3_get_eeprom_hw_cfg(tp);
13299 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13300 /* Allow reads and writes to the
13301 * APE register and memory space.
13303 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13304 PCISTATE_ALLOW_APE_SHMEM_WR;
13305 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13314 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13315 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13317 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13318 * GPIO1 driven high will bring 5700's external PHY out of reset.
13319 * It is also used as eeprom write protect on LOMs.
13321 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13322 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13323 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13324 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13325 GRC_LCLCTRL_GPIO_OUTPUT1);
13326 /* Unused GPIO3 must be driven as output on 5752 because there
13327 * are no pull-up resistors on unused GPIO pins.
13329 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13330 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13335 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13337 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13338 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13339 /* Turn off the debug UART. */
13340 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13341 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13342 /* Keep VMain power. */
13343 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13344 GRC_LCLCTRL_GPIO_OUTPUT0;
13347 /* Force the chip into D0. */
13348 err = tg3_set_power_state(tp, PCI_D0);
13350 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
13351 pci_name(tp->pdev));
13355 /* Derive initial jumbo mode from MTU assigned in
13356 * ether_setup() via the alloc_etherdev() call
13358 if (tp->dev->mtu > ETH_DATA_LEN &&
13359 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13360 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13362 /* Determine WakeOnLan speed to use. */
13363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13364 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13365 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13366 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13367 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13369 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13373 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
13375 /* A few boards don't want Ethernet@WireSpeed phy feature */
13376 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13377 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13378 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13379 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13380 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
13381 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
13382 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13384 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13385 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13386 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13387 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13388 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13390 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13391 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13392 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13393 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13394 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
13395 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
13396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13400 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13401 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13402 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13403 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13404 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13406 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13410 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13411 tp->phy_otp = tg3_read_otp_phycfg(tp);
13412 if (tp->phy_otp == 0)
13413 tp->phy_otp = TG3_OTP_DEFAULT;
13416 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13417 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13419 tp->mi_mode = MAC_MI_MODE_BASE;
13421 tp->coalesce_mode = 0;
13422 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13423 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13424 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13426 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13428 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13430 err = tg3_mdio_init(tp);
13434 /* Initialize data/descriptor byte/word swapping. */
13435 val = tr32(GRC_MODE);
13436 val &= GRC_MODE_HOST_STACKUP;
13437 tw32(GRC_MODE, val | tp->grc_mode);
13439 tg3_switch_clocks(tp);
13441 /* Clear this out for sanity. */
13442 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13444 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13446 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13447 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13448 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13450 if (chiprevid == CHIPREV_ID_5701_A0 ||
13451 chiprevid == CHIPREV_ID_5701_B0 ||
13452 chiprevid == CHIPREV_ID_5701_B2 ||
13453 chiprevid == CHIPREV_ID_5701_B5) {
13454 void __iomem *sram_base;
13456 /* Write some dummy words into the SRAM status block
13457 * area, see if it reads back correctly. If the return
13458 * value is bad, force enable the PCIX workaround.
13460 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13462 writel(0x00000000, sram_base);
13463 writel(0x00000000, sram_base + 4);
13464 writel(0xffffffff, sram_base + 4);
13465 if (readl(sram_base) != 0x00000000)
13466 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13471 tg3_nvram_init(tp);
13473 grc_misc_cfg = tr32(GRC_MISC_CFG);
13474 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13477 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13478 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13479 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13481 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13482 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13483 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13484 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13485 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13486 HOSTCC_MODE_CLRTICK_TXBD);
13488 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13490 tp->misc_host_ctrl);
13493 /* Preserve the APE MAC_MODE bits */
13494 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13495 tp->mac_mode = tr32(MAC_MODE) |
13496 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13498 tp->mac_mode = TG3_DEF_MAC_MODE;
13500 /* these are limited to 10/100 only */
13501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13502 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13503 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13504 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13505 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13506 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13507 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13508 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13509 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13510 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13511 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13512 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13513 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13514 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13516 err = tg3_phy_probe(tp);
13518 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
13519 pci_name(tp->pdev), err);
13520 /* ... but do not return immediately ... */
13524 tg3_read_partno(tp);
13525 tg3_read_fw_ver(tp);
13527 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13528 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13531 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13533 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13536 /* 5700 {AX,BX} chips have a broken status block link
13537 * change bit implementation, so we must use the
13538 * status register in those cases.
13540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13541 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13543 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13545 /* The led_ctrl is set during tg3_phy_probe, here we might
13546 * have to force the link status polling mechanism based
13547 * upon subsystem IDs.
13549 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13551 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13552 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13553 TG3_FLAG_USE_LINKCHG_REG);
13556 /* For all SERDES we poll the MAC status register. */
13557 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13558 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13560 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13562 tp->rx_offset = NET_IP_ALIGN;
13563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13564 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13567 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13569 /* Increment the rx prod index on the rx std ring by at most
13570 * 8 for these chips to workaround hw errata.
13572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13575 tp->rx_std_max_post = 8;
13577 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13578 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13579 PCIE_PWR_MGMT_L1_THRESH_MSK;
13584 #ifdef CONFIG_SPARC
13585 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13587 struct net_device *dev = tp->dev;
13588 struct pci_dev *pdev = tp->pdev;
13589 struct device_node *dp = pci_device_to_OF_node(pdev);
13590 const unsigned char *addr;
13593 addr = of_get_property(dp, "local-mac-address", &len);
13594 if (addr && len == 6) {
13595 memcpy(dev->dev_addr, addr, 6);
13596 memcpy(dev->perm_addr, dev->dev_addr, 6);
13602 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13604 struct net_device *dev = tp->dev;
13606 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13607 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13612 static int __devinit tg3_get_device_address(struct tg3 *tp)
13614 struct net_device *dev = tp->dev;
13615 u32 hi, lo, mac_offset;
13618 #ifdef CONFIG_SPARC
13619 if (!tg3_get_macaddr_sparc(tp))
13624 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13625 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13626 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13628 if (tg3_nvram_lock(tp))
13629 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13631 tg3_nvram_unlock(tp);
13632 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13633 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13635 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13638 /* First try to get it from MAC address mailbox. */
13639 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13640 if ((hi >> 16) == 0x484b) {
13641 dev->dev_addr[0] = (hi >> 8) & 0xff;
13642 dev->dev_addr[1] = (hi >> 0) & 0xff;
13644 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13645 dev->dev_addr[2] = (lo >> 24) & 0xff;
13646 dev->dev_addr[3] = (lo >> 16) & 0xff;
13647 dev->dev_addr[4] = (lo >> 8) & 0xff;
13648 dev->dev_addr[5] = (lo >> 0) & 0xff;
13650 /* Some old bootcode may report a 0 MAC address in SRAM */
13651 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13654 /* Next, try NVRAM. */
13655 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13656 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13657 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13658 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13659 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13661 /* Finally just fetch it out of the MAC control regs. */
13663 hi = tr32(MAC_ADDR_0_HIGH);
13664 lo = tr32(MAC_ADDR_0_LOW);
13666 dev->dev_addr[5] = lo & 0xff;
13667 dev->dev_addr[4] = (lo >> 8) & 0xff;
13668 dev->dev_addr[3] = (lo >> 16) & 0xff;
13669 dev->dev_addr[2] = (lo >> 24) & 0xff;
13670 dev->dev_addr[1] = hi & 0xff;
13671 dev->dev_addr[0] = (hi >> 8) & 0xff;
13675 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13676 #ifdef CONFIG_SPARC
13677 if (!tg3_get_default_macaddr_sparc(tp))
13682 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13686 #define BOUNDARY_SINGLE_CACHELINE 1
13687 #define BOUNDARY_MULTI_CACHELINE 2
13689 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13691 int cacheline_size;
13695 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13697 cacheline_size = 1024;
13699 cacheline_size = (int) byte * 4;
13701 /* On 5703 and later chips, the boundary bits have no
13704 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13705 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13706 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13709 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13710 goal = BOUNDARY_MULTI_CACHELINE;
13712 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13713 goal = BOUNDARY_SINGLE_CACHELINE;
13719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13721 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13728 /* PCI controllers on most RISC systems tend to disconnect
13729 * when a device tries to burst across a cache-line boundary.
13730 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13732 * Unfortunately, for PCI-E there are only limited
13733 * write-side controls for this, and thus for reads
13734 * we will still get the disconnects. We'll also waste
13735 * these PCI cycles for both read and write for chips
13736 * other than 5700 and 5701 which do not implement the
13739 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13740 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13741 switch (cacheline_size) {
13746 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13747 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13748 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13750 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13751 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13756 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13757 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13761 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13762 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13765 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13766 switch (cacheline_size) {
13770 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13771 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13772 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13778 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13779 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13783 switch (cacheline_size) {
13785 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13786 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13787 DMA_RWCTRL_WRITE_BNDRY_16);
13792 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13793 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13794 DMA_RWCTRL_WRITE_BNDRY_32);
13799 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13800 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13801 DMA_RWCTRL_WRITE_BNDRY_64);
13806 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13807 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13808 DMA_RWCTRL_WRITE_BNDRY_128);
13813 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13814 DMA_RWCTRL_WRITE_BNDRY_256);
13817 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13818 DMA_RWCTRL_WRITE_BNDRY_512);
13822 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13823 DMA_RWCTRL_WRITE_BNDRY_1024);
13832 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13834 struct tg3_internal_buffer_desc test_desc;
13835 u32 sram_dma_descs;
13838 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13840 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13841 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13842 tw32(RDMAC_STATUS, 0);
13843 tw32(WDMAC_STATUS, 0);
13845 tw32(BUFMGR_MODE, 0);
13846 tw32(FTQ_RESET, 0);
13848 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13849 test_desc.addr_lo = buf_dma & 0xffffffff;
13850 test_desc.nic_mbuf = 0x00002100;
13851 test_desc.len = size;
13854 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13855 * the *second* time the tg3 driver was getting loaded after an
13858 * Broadcom tells me:
13859 * ...the DMA engine is connected to the GRC block and a DMA
13860 * reset may affect the GRC block in some unpredictable way...
13861 * The behavior of resets to individual blocks has not been tested.
13863 * Broadcom noted the GRC reset will also reset all sub-components.
13866 test_desc.cqid_sqid = (13 << 8) | 2;
13868 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13871 test_desc.cqid_sqid = (16 << 8) | 7;
13873 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13876 test_desc.flags = 0x00000005;
13878 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13881 val = *(((u32 *)&test_desc) + i);
13882 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13883 sram_dma_descs + (i * sizeof(u32)));
13884 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13886 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13889 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13891 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13895 for (i = 0; i < 40; i++) {
13899 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13901 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13902 if ((val & 0xffff) == sram_dma_descs) {
13913 #define TEST_BUFFER_SIZE 0x2000
13915 static int __devinit tg3_test_dma(struct tg3 *tp)
13917 dma_addr_t buf_dma;
13918 u32 *buf, saved_dma_rwctrl;
13921 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13927 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13928 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13930 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13932 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13936 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13937 /* DMA read watermark not used on PCIE */
13938 tp->dma_rwctrl |= 0x00180000;
13939 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13942 tp->dma_rwctrl |= 0x003f0000;
13944 tp->dma_rwctrl |= 0x003f000f;
13946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13948 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13949 u32 read_water = 0x7;
13951 /* If the 5704 is behind the EPB bridge, we can
13952 * do the less restrictive ONE_DMA workaround for
13953 * better performance.
13955 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13957 tp->dma_rwctrl |= 0x8000;
13958 else if (ccval == 0x6 || ccval == 0x7)
13959 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13963 /* Set bit 23 to enable PCIX hw bug fix */
13965 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13966 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13968 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13969 /* 5780 always in PCIX mode */
13970 tp->dma_rwctrl |= 0x00144000;
13971 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13972 /* 5714 always in PCIX mode */
13973 tp->dma_rwctrl |= 0x00148000;
13975 tp->dma_rwctrl |= 0x001b000f;
13979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13981 tp->dma_rwctrl &= 0xfffffff0;
13983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13985 /* Remove this if it causes problems for some boards. */
13986 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13988 /* On 5700/5701 chips, we need to set this bit.
13989 * Otherwise the chip will issue cacheline transactions
13990 * to streamable DMA memory with not all the byte
13991 * enables turned on. This is an error on several
13992 * RISC PCI controllers, in particular sparc64.
13994 * On 5703/5704 chips, this bit has been reassigned
13995 * a different meaning. In particular, it is used
13996 * on those chips to enable a PCI-X workaround.
13998 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14001 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14004 /* Unneeded, already done by tg3_get_invariants. */
14005 tg3_switch_clocks(tp);
14008 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14009 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14012 /* It is best to perform DMA test with maximum write burst size
14013 * to expose the 5700/5701 write DMA bug.
14015 saved_dma_rwctrl = tp->dma_rwctrl;
14016 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14017 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14022 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14025 /* Send the buffer to the chip. */
14026 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14028 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
14033 /* validate data reached card RAM correctly. */
14034 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14036 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14037 if (le32_to_cpu(val) != p[i]) {
14038 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
14039 /* ret = -ENODEV here? */
14044 /* Now read it back. */
14045 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14047 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
14053 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14057 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14058 DMA_RWCTRL_WRITE_BNDRY_16) {
14059 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14060 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14061 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14064 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
14070 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14076 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14077 DMA_RWCTRL_WRITE_BNDRY_16) {
14078 static struct pci_device_id dma_wait_state_chipsets[] = {
14079 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14080 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14084 /* DMA test passed without adjusting DMA boundary,
14085 * now look for chipsets that are known to expose the
14086 * DMA bug without failing the test.
14088 if (pci_dev_present(dma_wait_state_chipsets)) {
14089 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14090 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14093 /* Safe to use the calculated DMA boundary. */
14094 tp->dma_rwctrl = saved_dma_rwctrl;
14096 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14100 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14105 static void __devinit tg3_init_link_config(struct tg3 *tp)
14107 tp->link_config.advertising =
14108 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14109 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14110 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14111 ADVERTISED_Autoneg | ADVERTISED_MII);
14112 tp->link_config.speed = SPEED_INVALID;
14113 tp->link_config.duplex = DUPLEX_INVALID;
14114 tp->link_config.autoneg = AUTONEG_ENABLE;
14115 tp->link_config.active_speed = SPEED_INVALID;
14116 tp->link_config.active_duplex = DUPLEX_INVALID;
14117 tp->link_config.phy_is_low_power = 0;
14118 tp->link_config.orig_speed = SPEED_INVALID;
14119 tp->link_config.orig_duplex = DUPLEX_INVALID;
14120 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14123 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14127 tp->bufmgr_config.mbuf_read_dma_low_water =
14128 DEFAULT_MB_RDMA_LOW_WATER_5705;
14129 tp->bufmgr_config.mbuf_mac_rx_low_water =
14130 DEFAULT_MB_MACRX_LOW_WATER_57765;
14131 tp->bufmgr_config.mbuf_high_water =
14132 DEFAULT_MB_HIGH_WATER_57765;
14134 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14135 DEFAULT_MB_RDMA_LOW_WATER_5705;
14136 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14137 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14138 tp->bufmgr_config.mbuf_high_water_jumbo =
14139 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14140 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14141 tp->bufmgr_config.mbuf_read_dma_low_water =
14142 DEFAULT_MB_RDMA_LOW_WATER_5705;
14143 tp->bufmgr_config.mbuf_mac_rx_low_water =
14144 DEFAULT_MB_MACRX_LOW_WATER_5705;
14145 tp->bufmgr_config.mbuf_high_water =
14146 DEFAULT_MB_HIGH_WATER_5705;
14147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14148 tp->bufmgr_config.mbuf_mac_rx_low_water =
14149 DEFAULT_MB_MACRX_LOW_WATER_5906;
14150 tp->bufmgr_config.mbuf_high_water =
14151 DEFAULT_MB_HIGH_WATER_5906;
14154 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14155 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14156 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14157 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14158 tp->bufmgr_config.mbuf_high_water_jumbo =
14159 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14161 tp->bufmgr_config.mbuf_read_dma_low_water =
14162 DEFAULT_MB_RDMA_LOW_WATER;
14163 tp->bufmgr_config.mbuf_mac_rx_low_water =
14164 DEFAULT_MB_MACRX_LOW_WATER;
14165 tp->bufmgr_config.mbuf_high_water =
14166 DEFAULT_MB_HIGH_WATER;
14168 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14169 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14170 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14171 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14172 tp->bufmgr_config.mbuf_high_water_jumbo =
14173 DEFAULT_MB_HIGH_WATER_JUMBO;
14176 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14177 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14180 static char * __devinit tg3_phy_string(struct tg3 *tp)
14182 switch (tp->phy_id & PHY_ID_MASK) {
14183 case PHY_ID_BCM5400: return "5400";
14184 case PHY_ID_BCM5401: return "5401";
14185 case PHY_ID_BCM5411: return "5411";
14186 case PHY_ID_BCM5701: return "5701";
14187 case PHY_ID_BCM5703: return "5703";
14188 case PHY_ID_BCM5704: return "5704";
14189 case PHY_ID_BCM5705: return "5705";
14190 case PHY_ID_BCM5750: return "5750";
14191 case PHY_ID_BCM5752: return "5752";
14192 case PHY_ID_BCM5714: return "5714";
14193 case PHY_ID_BCM5780: return "5780";
14194 case PHY_ID_BCM5755: return "5755";
14195 case PHY_ID_BCM5787: return "5787";
14196 case PHY_ID_BCM5784: return "5784";
14197 case PHY_ID_BCM5756: return "5722/5756";
14198 case PHY_ID_BCM5906: return "5906";
14199 case PHY_ID_BCM5761: return "5761";
14200 case PHY_ID_BCM5718C: return "5718C";
14201 case PHY_ID_BCM5718S: return "5718S";
14202 case PHY_ID_BCM8002: return "8002/serdes";
14203 case 0: return "serdes";
14204 default: return "unknown";
14208 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14210 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14211 strcpy(str, "PCI Express");
14213 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14214 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14216 strcpy(str, "PCIX:");
14218 if ((clock_ctrl == 7) ||
14219 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14220 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14221 strcat(str, "133MHz");
14222 else if (clock_ctrl == 0)
14223 strcat(str, "33MHz");
14224 else if (clock_ctrl == 2)
14225 strcat(str, "50MHz");
14226 else if (clock_ctrl == 4)
14227 strcat(str, "66MHz");
14228 else if (clock_ctrl == 6)
14229 strcat(str, "100MHz");
14231 strcpy(str, "PCI:");
14232 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14233 strcat(str, "66MHz");
14235 strcat(str, "33MHz");
14237 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14238 strcat(str, ":32-bit");
14240 strcat(str, ":64-bit");
14244 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14246 struct pci_dev *peer;
14247 unsigned int func, devnr = tp->pdev->devfn & ~7;
14249 for (func = 0; func < 8; func++) {
14250 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14251 if (peer && peer != tp->pdev)
14255 /* 5704 can be configured in single-port mode, set peer to
14256 * tp->pdev in that case.
14264 * We don't need to keep the refcount elevated; there's no way
14265 * to remove one half of this device without removing the other
14272 static void __devinit tg3_init_coal(struct tg3 *tp)
14274 struct ethtool_coalesce *ec = &tp->coal;
14276 memset(ec, 0, sizeof(*ec));
14277 ec->cmd = ETHTOOL_GCOALESCE;
14278 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14279 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14280 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14281 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14282 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14283 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14284 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14285 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14286 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14288 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14289 HOSTCC_MODE_CLRTICK_TXBD)) {
14290 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14291 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14292 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14293 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14296 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14297 ec->rx_coalesce_usecs_irq = 0;
14298 ec->tx_coalesce_usecs_irq = 0;
14299 ec->stats_block_coalesce_usecs = 0;
14303 static const struct net_device_ops tg3_netdev_ops = {
14304 .ndo_open = tg3_open,
14305 .ndo_stop = tg3_close,
14306 .ndo_start_xmit = tg3_start_xmit,
14307 .ndo_get_stats = tg3_get_stats,
14308 .ndo_validate_addr = eth_validate_addr,
14309 .ndo_set_multicast_list = tg3_set_rx_mode,
14310 .ndo_set_mac_address = tg3_set_mac_addr,
14311 .ndo_do_ioctl = tg3_ioctl,
14312 .ndo_tx_timeout = tg3_tx_timeout,
14313 .ndo_change_mtu = tg3_change_mtu,
14314 #if TG3_VLAN_TAG_USED
14315 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14317 #ifdef CONFIG_NET_POLL_CONTROLLER
14318 .ndo_poll_controller = tg3_poll_controller,
14322 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14323 .ndo_open = tg3_open,
14324 .ndo_stop = tg3_close,
14325 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14326 .ndo_get_stats = tg3_get_stats,
14327 .ndo_validate_addr = eth_validate_addr,
14328 .ndo_set_multicast_list = tg3_set_rx_mode,
14329 .ndo_set_mac_address = tg3_set_mac_addr,
14330 .ndo_do_ioctl = tg3_ioctl,
14331 .ndo_tx_timeout = tg3_tx_timeout,
14332 .ndo_change_mtu = tg3_change_mtu,
14333 #if TG3_VLAN_TAG_USED
14334 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14336 #ifdef CONFIG_NET_POLL_CONTROLLER
14337 .ndo_poll_controller = tg3_poll_controller,
14341 static int __devinit tg3_init_one(struct pci_dev *pdev,
14342 const struct pci_device_id *ent)
14344 static int tg3_version_printed = 0;
14345 struct net_device *dev;
14347 int i, err, pm_cap;
14348 u32 sndmbx, rcvmbx, intmbx;
14350 u64 dma_mask, persist_dma_mask;
14352 if (tg3_version_printed++ == 0)
14353 printk(KERN_INFO "%s", version);
14355 err = pci_enable_device(pdev);
14357 printk(KERN_ERR PFX "Cannot enable PCI device, "
14362 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14364 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
14366 goto err_out_disable_pdev;
14369 pci_set_master(pdev);
14371 /* Find power-management capability. */
14372 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14374 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
14377 goto err_out_free_res;
14380 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14382 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
14384 goto err_out_free_res;
14387 SET_NETDEV_DEV(dev, &pdev->dev);
14389 #if TG3_VLAN_TAG_USED
14390 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14393 tp = netdev_priv(dev);
14396 tp->pm_cap = pm_cap;
14397 tp->rx_mode = TG3_DEF_RX_MODE;
14398 tp->tx_mode = TG3_DEF_TX_MODE;
14401 tp->msg_enable = tg3_debug;
14403 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14405 /* The word/byte swap controls here control register access byte
14406 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14409 tp->misc_host_ctrl =
14410 MISC_HOST_CTRL_MASK_PCI_INT |
14411 MISC_HOST_CTRL_WORD_SWAP |
14412 MISC_HOST_CTRL_INDIR_ACCESS |
14413 MISC_HOST_CTRL_PCISTATE_RW;
14415 /* The NONFRM (non-frame) byte/word swap controls take effect
14416 * on descriptor entries, anything which isn't packet data.
14418 * The StrongARM chips on the board (one for tx, one for rx)
14419 * are running in big-endian mode.
14421 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14422 GRC_MODE_WSWAP_NONFRM_DATA);
14423 #ifdef __BIG_ENDIAN
14424 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14426 spin_lock_init(&tp->lock);
14427 spin_lock_init(&tp->indirect_lock);
14428 INIT_WORK(&tp->reset_task, tg3_reset_task);
14430 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14432 printk(KERN_ERR PFX "Cannot map device registers, "
14435 goto err_out_free_dev;
14438 tg3_init_link_config(tp);
14440 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14441 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14443 dev->ethtool_ops = &tg3_ethtool_ops;
14444 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14445 dev->irq = pdev->irq;
14447 err = tg3_get_invariants(tp);
14449 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
14451 goto err_out_iounmap;
14454 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14455 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14456 dev->netdev_ops = &tg3_netdev_ops;
14458 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14461 /* The EPB bridge inside 5714, 5715, and 5780 and any
14462 * device behind the EPB cannot support DMA addresses > 40-bit.
14463 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14464 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14465 * do DMA address check in tg3_start_xmit().
14467 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14468 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14469 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14470 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14471 #ifdef CONFIG_HIGHMEM
14472 dma_mask = DMA_BIT_MASK(64);
14475 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14477 /* Configure DMA attributes. */
14478 if (dma_mask > DMA_BIT_MASK(32)) {
14479 err = pci_set_dma_mask(pdev, dma_mask);
14481 dev->features |= NETIF_F_HIGHDMA;
14482 err = pci_set_consistent_dma_mask(pdev,
14485 printk(KERN_ERR PFX "Unable to obtain 64 bit "
14486 "DMA for consistent allocations\n");
14487 goto err_out_iounmap;
14491 if (err || dma_mask == DMA_BIT_MASK(32)) {
14492 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14494 printk(KERN_ERR PFX "No usable DMA configuration, "
14496 goto err_out_iounmap;
14500 tg3_init_bufmgr_config(tp);
14502 /* Selectively allow TSO based on operating conditions */
14503 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14504 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14505 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14507 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14508 tp->fw_needed = NULL;
14511 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14512 tp->fw_needed = FIRMWARE_TG3;
14514 /* TSO is on by default on chips that support hardware TSO.
14515 * Firmware TSO on older chips gives lower performance, so it
14516 * is off by default, but can be enabled using ethtool.
14518 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14519 (dev->features & NETIF_F_IP_CSUM))
14520 dev->features |= NETIF_F_TSO;
14522 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14523 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14524 if (dev->features & NETIF_F_IPV6_CSUM)
14525 dev->features |= NETIF_F_TSO6;
14526 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14528 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14529 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14532 dev->features |= NETIF_F_TSO_ECN;
14535 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14536 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14537 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14538 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14539 tp->rx_pending = 63;
14542 err = tg3_get_device_address(tp);
14544 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
14546 goto err_out_iounmap;
14549 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14550 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14551 if (!tp->aperegs) {
14552 printk(KERN_ERR PFX "Cannot map APE registers, "
14555 goto err_out_iounmap;
14558 tg3_ape_lock_init(tp);
14560 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14561 tg3_read_dash_ver(tp);
14565 * Reset chip in case UNDI or EFI driver did not shutdown
14566 * DMA self test will enable WDMAC and we'll see (spurious)
14567 * pending DMA on the PCI bus at that point.
14569 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14570 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14571 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14572 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14575 err = tg3_test_dma(tp);
14577 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
14578 goto err_out_apeunmap;
14581 /* flow control autonegotiation is default behavior */
14582 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14583 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14585 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14586 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14587 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14588 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14589 struct tg3_napi *tnapi = &tp->napi[i];
14592 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14594 tnapi->int_mbox = intmbx;
14600 tnapi->consmbox = rcvmbx;
14601 tnapi->prodmbox = sndmbx;
14604 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14605 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14607 tnapi->coal_now = HOSTCC_MODE_NOW;
14608 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14611 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14615 * If we support MSIX, we'll be using RSS. If we're using
14616 * RSS, the first vector only handles link interrupts and the
14617 * remaining vectors handle rx and tx interrupts. Reuse the
14618 * mailbox values for the next iteration. The values we setup
14619 * above are still useful for the single vectored mode.
14634 pci_set_drvdata(pdev, dev);
14636 err = register_netdev(dev);
14638 printk(KERN_ERR PFX "Cannot register net device, "
14640 goto err_out_apeunmap;
14643 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14645 tp->board_part_number,
14646 tp->pci_chip_rev_id,
14647 tg3_bus_string(tp, str),
14650 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14651 struct phy_device *phydev;
14652 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14654 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14655 tp->dev->name, phydev->drv->name,
14656 dev_name(&phydev->dev));
14659 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14660 tp->dev->name, tg3_phy_string(tp),
14661 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14662 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14663 "10/100/1000Base-T")),
14664 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14666 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14668 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14669 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14670 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14671 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14672 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14673 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14674 dev->name, tp->dma_rwctrl,
14675 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14676 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14682 iounmap(tp->aperegs);
14683 tp->aperegs = NULL;
14696 pci_release_regions(pdev);
14698 err_out_disable_pdev:
14699 pci_disable_device(pdev);
14700 pci_set_drvdata(pdev, NULL);
14704 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14706 struct net_device *dev = pci_get_drvdata(pdev);
14709 struct tg3 *tp = netdev_priv(dev);
14712 release_firmware(tp->fw);
14714 flush_scheduled_work();
14716 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14721 unregister_netdev(dev);
14723 iounmap(tp->aperegs);
14724 tp->aperegs = NULL;
14731 pci_release_regions(pdev);
14732 pci_disable_device(pdev);
14733 pci_set_drvdata(pdev, NULL);
14737 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14739 struct net_device *dev = pci_get_drvdata(pdev);
14740 struct tg3 *tp = netdev_priv(dev);
14741 pci_power_t target_state;
14744 /* PCI register 4 needs to be saved whether netif_running() or not.
14745 * MSI address and data need to be saved if using MSI and
14748 pci_save_state(pdev);
14750 if (!netif_running(dev))
14753 flush_scheduled_work();
14755 tg3_netif_stop(tp);
14757 del_timer_sync(&tp->timer);
14759 tg3_full_lock(tp, 1);
14760 tg3_disable_ints(tp);
14761 tg3_full_unlock(tp);
14763 netif_device_detach(dev);
14765 tg3_full_lock(tp, 0);
14766 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14767 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14768 tg3_full_unlock(tp);
14770 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14772 err = tg3_set_power_state(tp, target_state);
14776 tg3_full_lock(tp, 0);
14778 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14779 err2 = tg3_restart_hw(tp, 1);
14783 tp->timer.expires = jiffies + tp->timer_offset;
14784 add_timer(&tp->timer);
14786 netif_device_attach(dev);
14787 tg3_netif_start(tp);
14790 tg3_full_unlock(tp);
14799 static int tg3_resume(struct pci_dev *pdev)
14801 struct net_device *dev = pci_get_drvdata(pdev);
14802 struct tg3 *tp = netdev_priv(dev);
14805 pci_restore_state(tp->pdev);
14807 if (!netif_running(dev))
14810 err = tg3_set_power_state(tp, PCI_D0);
14814 netif_device_attach(dev);
14816 tg3_full_lock(tp, 0);
14818 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14819 err = tg3_restart_hw(tp, 1);
14823 tp->timer.expires = jiffies + tp->timer_offset;
14824 add_timer(&tp->timer);
14826 tg3_netif_start(tp);
14829 tg3_full_unlock(tp);
14837 static struct pci_driver tg3_driver = {
14838 .name = DRV_MODULE_NAME,
14839 .id_table = tg3_pci_tbl,
14840 .probe = tg3_init_one,
14841 .remove = __devexit_p(tg3_remove_one),
14842 .suspend = tg3_suspend,
14843 .resume = tg3_resume
14846 static int __init tg3_init(void)
14848 return pci_register_driver(&tg3_driver);
14851 static void __exit tg3_cleanup(void)
14853 pci_unregister_driver(&tg3_driver);
14856 module_init(tg3_init);
14857 module_exit(tg3_cleanup);