1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define DRV_NAME "via-rhine"
33 #define DRV_VERSION "1.4.3"
34 #define DRV_RELDATE "2007-03-06"
37 /* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
40 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41 static int max_interrupt_work = 20;
43 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48 static int rx_copybreak = 1518;
50 static int rx_copybreak;
53 /* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64 static const int multicast_filter_limit = 32;
67 /* Operational parameters that are set at compile time. */
69 /* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74 #define TX_RING_SIZE 16
75 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
76 #ifdef CONFIG_VIA_RHINE_NAPI
77 #define RX_RING_SIZE 64
79 #define RX_RING_SIZE 16
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
90 #include <linux/module.h>
91 #include <linux/moduleparam.h>
92 #include <linux/kernel.h>
93 #include <linux/string.h>
94 #include <linux/timer.h>
95 #include <linux/errno.h>
96 #include <linux/ioport.h>
97 #include <linux/slab.h>
98 #include <linux/interrupt.h>
99 #include <linux/pci.h>
100 #include <linux/dma-mapping.h>
101 #include <linux/netdevice.h>
102 #include <linux/etherdevice.h>
103 #include <linux/skbuff.h>
104 #include <linux/init.h>
105 #include <linux/delay.h>
106 #include <linux/mii.h>
107 #include <linux/ethtool.h>
108 #include <linux/crc32.h>
109 #include <linux/bitops.h>
110 #include <asm/processor.h> /* Processor type for cache alignment. */
113 #include <asm/uaccess.h>
114 #include <linux/dmi.h>
116 /* These identify the driver base version and may not be removed. */
117 static char version[] __devinitdata =
118 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
120 /* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122 #ifdef CONFIG_VIA_RHINE_MMIO
127 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129 MODULE_LICENSE("GPL");
131 module_param(max_interrupt_work, int, 0);
132 module_param(debug, int, 0);
133 module_param(rx_copybreak, int, 0);
134 module_param(avoid_D3, bool, 0);
135 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
138 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
143 I. Board Compatibility
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
148 II. Board-specific settings
150 Boards with this chip are functional only in a bus-master PCI slot.
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
158 III. Driver operation
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
166 IIIb/c. Transmit/Receive Structure
168 This driver attempts to use a zero-copy receive and transmit scheme.
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
195 IIId. Synchronization
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 dev->priv->lock spinlock. The other thread is the interrupt handler, which
200 is single threaded by the hardware and interrupt handling software.
202 The send packet thread has partial control over the Tx ring. It locks the
203 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
204 is not available it stops the transmit queue by calling netif_stop_queue.
206 The interrupt handler has exclusive control over the Rx ring and records stats
207 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208 empty by incrementing the dirty_tx mark. If at least half of the entries in
209 the Rx ring are available the transmit queue is woken up if it was stopped.
215 Preliminary VT86C100A manual from http://www.via.com.tw/
216 http://www.scyld.com/expert/100mbps.html
217 http://www.scyld.com/expert/NWay.html
218 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
224 The VT86C100A manual is not reliable information.
225 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226 in significant performance degradation for bounce buffer copies on transmit
227 and unaligned IP headers on receive.
228 The chip does not pad to minimum transmit length.
233 /* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
253 VT6105M = 0x90, /* Management adapter */
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
264 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
265 * MMIO as well as for the collision counter and the Tx FIFO underflow
266 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
269 /* Beware of PCI posted writes */
270 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
272 static const struct pci_device_id rhine_pci_tbl[] = {
273 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
274 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
275 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
276 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
277 { } /* terminate list */
279 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
282 /* Offsets to the device registers. */
283 enum register_offsets {
284 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 IntrStatus=0x0C, IntrEnable=0x0E,
287 MulticastFilter0=0x10, MulticastFilter1=0x14,
288 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
289 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
290 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
291 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
292 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
293 StickyHW=0x83, IntrStatus2=0x84,
294 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
295 WOLcrClr1=0xA6, WOLcgClr=0xA7,
296 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299 /* Bits in ConfigD */
301 BackOptional=0x01, BackModify=0x02,
302 BackCaptureEffect=0x04, BackRandom=0x08
306 /* Registers we check that mmio and reg are the same. */
307 static const int mmio_verify_registers[] = {
308 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
313 /* Bits in the interrupt status/mask registers. */
314 enum intr_status_bits {
315 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
316 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
318 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
319 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
320 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
322 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
323 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
324 IntrTxErrSummary=0x082218,
327 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
336 /* The Rx and Tx buffer descriptors. */
339 u32 desc_length; /* Chain flag, Buffer/frame length */
345 u32 desc_length; /* Chain flag, Tx Config, Frame length */
350 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
351 #define TXDESC 0x00e08000
353 enum rx_status_bits {
354 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
357 /* Bits in *_desc.*_status */
358 enum desc_status_bits {
362 /* Bits in ChipCmd. */
364 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
365 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
366 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
367 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
370 struct rhine_private {
371 /* Descriptor rings */
372 struct rx_desc *rx_ring;
373 struct tx_desc *tx_ring;
374 dma_addr_t rx_ring_dma;
375 dma_addr_t tx_ring_dma;
377 /* The addresses of receive-in-place skbuffs. */
378 struct sk_buff *rx_skbuff[RX_RING_SIZE];
379 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
381 /* The saved address of a sent-in-place packet/buffer, for later free(). */
382 struct sk_buff *tx_skbuff[TX_RING_SIZE];
383 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
385 /* Tx bounce buffers (Rhine-I only) */
386 unsigned char *tx_buf[TX_RING_SIZE];
387 unsigned char *tx_bufs;
388 dma_addr_t tx_bufs_dma;
390 struct pci_dev *pdev;
392 struct net_device *dev;
393 struct napi_struct napi;
394 struct net_device_stats stats;
397 /* Frequently used values: keep some adjacent for cache effect. */
399 struct rx_desc *rx_head_desc;
400 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
401 unsigned int cur_tx, dirty_tx;
402 unsigned int rx_buf_sz; /* Based on MTU+slack. */
405 u8 tx_thresh, rx_thresh;
407 struct mii_if_info mii_if;
411 static int mdio_read(struct net_device *dev, int phy_id, int location);
412 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413 static int rhine_open(struct net_device *dev);
414 static void rhine_tx_timeout(struct net_device *dev);
415 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
416 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
417 static void rhine_tx(struct net_device *dev);
418 static int rhine_rx(struct net_device *dev, int limit);
419 static void rhine_error(struct net_device *dev, int intr_status);
420 static void rhine_set_rx_mode(struct net_device *dev);
421 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
422 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
423 static const struct ethtool_ops netdev_ethtool_ops;
424 static int rhine_close(struct net_device *dev);
425 static void rhine_shutdown (struct pci_dev *pdev);
427 #define RHINE_WAIT_FOR(condition) do { \
429 while (!(condition) && --i) \
431 if (debug > 1 && i < 512) \
432 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
433 DRV_NAME, 1024-i, __func__, __LINE__); \
436 static inline u32 get_intr_status(struct net_device *dev)
438 struct rhine_private *rp = netdev_priv(dev);
439 void __iomem *ioaddr = rp->base;
442 intr_status = ioread16(ioaddr + IntrStatus);
443 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
444 if (rp->quirks & rqStatusWBRace)
445 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
450 * Get power related registers into sane state.
451 * Notify user about past WOL event.
453 static void rhine_power_init(struct net_device *dev)
455 struct rhine_private *rp = netdev_priv(dev);
456 void __iomem *ioaddr = rp->base;
459 if (rp->quirks & rqWOL) {
460 /* Make sure chip is in power state D0 */
461 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
463 /* Disable "force PME-enable" */
464 iowrite8(0x80, ioaddr + WOLcgClr);
466 /* Clear power-event config bits (WOL) */
467 iowrite8(0xFF, ioaddr + WOLcrClr);
468 /* More recent cards can manage two additional patterns */
469 if (rp->quirks & rq6patterns)
470 iowrite8(0x03, ioaddr + WOLcrClr1);
472 /* Save power-event status bits */
473 wolstat = ioread8(ioaddr + PwrcsrSet);
474 if (rp->quirks & rq6patterns)
475 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
477 /* Clear power-event status bits */
478 iowrite8(0xFF, ioaddr + PwrcsrClr);
479 if (rp->quirks & rq6patterns)
480 iowrite8(0x03, ioaddr + PwrcsrClr1);
486 reason = "Magic packet";
489 reason = "Link went up";
492 reason = "Link went down";
495 reason = "Unicast packet";
498 reason = "Multicast/broadcast packet";
503 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
509 static void rhine_chip_reset(struct net_device *dev)
511 struct rhine_private *rp = netdev_priv(dev);
512 void __iomem *ioaddr = rp->base;
514 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
517 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
518 printk(KERN_INFO "%s: Reset not complete yet. "
519 "Trying harder.\n", DRV_NAME);
522 if (rp->quirks & rqForceReset)
523 iowrite8(0x40, ioaddr + MiscCmd);
525 /* Reset can take somewhat longer (rare) */
526 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
530 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
531 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
532 "failed" : "succeeded");
536 static void enable_mmio(long pioaddr, u32 quirks)
539 if (quirks & rqRhineI) {
540 /* More recent docs say that this bit is reserved ... */
541 n = inb(pioaddr + ConfigA) | 0x20;
542 outb(n, pioaddr + ConfigA);
544 n = inb(pioaddr + ConfigD) | 0x80;
545 outb(n, pioaddr + ConfigD);
551 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
552 * (plus 0x6C for Rhine-I/II)
554 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
556 struct rhine_private *rp = netdev_priv(dev);
557 void __iomem *ioaddr = rp->base;
559 outb(0x20, pioaddr + MACRegEEcsr);
560 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
564 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
565 * MMIO. If reloading EEPROM was done first this could be avoided, but
566 * it is not known if that still works with the "win98-reboot" problem.
568 enable_mmio(pioaddr, rp->quirks);
571 /* Turn off EEPROM-controlled wake-up (magic packet) */
572 if (rp->quirks & rqWOL)
573 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
577 #ifdef CONFIG_NET_POLL_CONTROLLER
578 static void rhine_poll(struct net_device *dev)
580 disable_irq(dev->irq);
581 rhine_interrupt(dev->irq, (void *)dev);
582 enable_irq(dev->irq);
586 #ifdef CONFIG_VIA_RHINE_NAPI
587 static int rhine_napipoll(struct napi_struct *napi, int budget)
589 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
590 struct net_device *dev = rp->dev;
591 void __iomem *ioaddr = rp->base;
594 work_done = rhine_rx(dev, budget);
596 if (work_done < budget) {
597 netif_rx_complete(dev, napi);
599 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
600 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
601 IntrTxDone | IntrTxError | IntrTxUnderrun |
602 IntrPCIErr | IntrStatsMax | IntrLinkChange,
603 ioaddr + IntrEnable);
609 static void rhine_hw_init(struct net_device *dev, long pioaddr)
611 struct rhine_private *rp = netdev_priv(dev);
613 /* Reset the chip to erase previous misconfiguration. */
614 rhine_chip_reset(dev);
616 /* Rhine-I needs extra time to recuperate before EEPROM reload */
617 if (rp->quirks & rqRhineI)
620 /* Reload EEPROM controlled bytes cleared by soft reset */
621 rhine_reload_eeprom(pioaddr, dev);
624 static int __devinit rhine_init_one(struct pci_dev *pdev,
625 const struct pci_device_id *ent)
627 struct net_device *dev;
628 struct rhine_private *rp;
633 void __iomem *ioaddr;
642 /* when built into the kernel, we only print version if device is found */
644 static int printed_version;
645 if (!printed_version++)
653 if (pdev->revision < VTunknown0) {
657 else if (pdev->revision >= VT6102) {
658 quirks = rqWOL | rqForceReset;
659 if (pdev->revision < VT6105) {
661 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
664 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
665 if (pdev->revision >= VT6105_B0)
666 quirks |= rq6patterns;
667 if (pdev->revision < VT6105M)
670 name = "Rhine III (Management Adapter)";
674 rc = pci_enable_device(pdev);
678 /* this should always be supported */
679 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
681 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
687 if ((pci_resource_len(pdev, 0) < io_size) ||
688 (pci_resource_len(pdev, 1) < io_size)) {
690 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
694 pioaddr = pci_resource_start(pdev, 0);
695 memaddr = pci_resource_start(pdev, 1);
697 pci_set_master(pdev);
699 dev = alloc_etherdev(sizeof(struct rhine_private));
702 printk(KERN_ERR "alloc_etherdev failed\n");
705 SET_NETDEV_DEV(dev, &pdev->dev);
707 rp = netdev_priv(dev);
710 rp->pioaddr = pioaddr;
713 rc = pci_request_regions(pdev, DRV_NAME);
715 goto err_out_free_netdev;
717 ioaddr = pci_iomap(pdev, bar, io_size);
720 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
721 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
722 goto err_out_free_res;
726 enable_mmio(pioaddr, quirks);
728 /* Check that selected MMIO registers match the PIO ones */
730 while (mmio_verify_registers[i]) {
731 int reg = mmio_verify_registers[i++];
732 unsigned char a = inb(pioaddr+reg);
733 unsigned char b = readb(ioaddr+reg);
736 printk(KERN_ERR "MMIO do not match PIO [%02x] "
737 "(%02x != %02x)\n", reg, a, b);
741 #endif /* USE_MMIO */
743 dev->base_addr = (unsigned long)ioaddr;
746 /* Get chip registers into a sane state */
747 rhine_power_init(dev);
748 rhine_hw_init(dev, pioaddr);
750 for (i = 0; i < 6; i++)
751 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
752 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
754 if (!is_valid_ether_addr(dev->perm_addr)) {
756 printk(KERN_ERR "Invalid MAC address\n");
760 /* For Rhine-I/II, phy_id is loaded from EEPROM */
762 phy_id = ioread8(ioaddr + 0x6C);
764 dev->irq = pdev->irq;
766 spin_lock_init(&rp->lock);
767 rp->mii_if.dev = dev;
768 rp->mii_if.mdio_read = mdio_read;
769 rp->mii_if.mdio_write = mdio_write;
770 rp->mii_if.phy_id_mask = 0x1f;
771 rp->mii_if.reg_num_mask = 0x1f;
773 /* The chip-specific entries in the device structure. */
774 dev->open = rhine_open;
775 dev->hard_start_xmit = rhine_start_tx;
776 dev->stop = rhine_close;
777 dev->get_stats = rhine_get_stats;
778 dev->set_multicast_list = rhine_set_rx_mode;
779 dev->do_ioctl = netdev_ioctl;
780 dev->ethtool_ops = &netdev_ethtool_ops;
781 dev->tx_timeout = rhine_tx_timeout;
782 dev->watchdog_timeo = TX_TIMEOUT;
783 #ifdef CONFIG_NET_POLL_CONTROLLER
784 dev->poll_controller = rhine_poll;
786 #ifdef CONFIG_VIA_RHINE_NAPI
787 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
789 if (rp->quirks & rqRhineI)
790 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
792 /* dev->name not defined before register_netdev()! */
793 rc = register_netdev(dev);
797 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
806 for (i = 0; i < 5; i++)
807 printk("%2.2x:", dev->dev_addr[i]);
808 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
810 pci_set_drvdata(pdev, dev);
814 int mii_status = mdio_read(dev, phy_id, 1);
815 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
816 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
817 if (mii_status != 0xffff && mii_status != 0x0000) {
818 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
819 printk(KERN_INFO "%s: MII PHY found at address "
820 "%d, status 0x%4.4x advertising %4.4x "
821 "Link %4.4x.\n", dev->name, phy_id,
822 mii_status, rp->mii_if.advertising,
823 mdio_read(dev, phy_id, 5));
825 /* set IFF_RUNNING */
826 if (mii_status & BMSR_LSTATUS)
827 netif_carrier_on(dev);
829 netif_carrier_off(dev);
833 rp->mii_if.phy_id = phy_id;
834 if (debug > 1 && avoid_D3)
835 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
841 pci_iounmap(pdev, ioaddr);
843 pci_release_regions(pdev);
850 static int alloc_ring(struct net_device* dev)
852 struct rhine_private *rp = netdev_priv(dev);
856 ring = pci_alloc_consistent(rp->pdev,
857 RX_RING_SIZE * sizeof(struct rx_desc) +
858 TX_RING_SIZE * sizeof(struct tx_desc),
861 printk(KERN_ERR "Could not allocate DMA memory.\n");
864 if (rp->quirks & rqRhineI) {
865 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
866 PKT_BUF_SZ * TX_RING_SIZE,
868 if (rp->tx_bufs == NULL) {
869 pci_free_consistent(rp->pdev,
870 RX_RING_SIZE * sizeof(struct rx_desc) +
871 TX_RING_SIZE * sizeof(struct tx_desc),
878 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
879 rp->rx_ring_dma = ring_dma;
880 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
885 static void free_ring(struct net_device* dev)
887 struct rhine_private *rp = netdev_priv(dev);
889 pci_free_consistent(rp->pdev,
890 RX_RING_SIZE * sizeof(struct rx_desc) +
891 TX_RING_SIZE * sizeof(struct tx_desc),
892 rp->rx_ring, rp->rx_ring_dma);
896 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
897 rp->tx_bufs, rp->tx_bufs_dma);
903 static void alloc_rbufs(struct net_device *dev)
905 struct rhine_private *rp = netdev_priv(dev);
909 rp->dirty_rx = rp->cur_rx = 0;
911 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
912 rp->rx_head_desc = &rp->rx_ring[0];
913 next = rp->rx_ring_dma;
915 /* Init the ring entries */
916 for (i = 0; i < RX_RING_SIZE; i++) {
917 rp->rx_ring[i].rx_status = 0;
918 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
919 next += sizeof(struct rx_desc);
920 rp->rx_ring[i].next_desc = cpu_to_le32(next);
921 rp->rx_skbuff[i] = NULL;
923 /* Mark the last entry as wrapping the ring. */
924 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
926 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
927 for (i = 0; i < RX_RING_SIZE; i++) {
928 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
929 rp->rx_skbuff[i] = skb;
932 skb->dev = dev; /* Mark as being used by this device. */
934 rp->rx_skbuff_dma[i] =
935 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
938 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
939 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
941 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
944 static void free_rbufs(struct net_device* dev)
946 struct rhine_private *rp = netdev_priv(dev);
949 /* Free all the skbuffs in the Rx queue. */
950 for (i = 0; i < RX_RING_SIZE; i++) {
951 rp->rx_ring[i].rx_status = 0;
952 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
953 if (rp->rx_skbuff[i]) {
954 pci_unmap_single(rp->pdev,
955 rp->rx_skbuff_dma[i],
956 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
957 dev_kfree_skb(rp->rx_skbuff[i]);
959 rp->rx_skbuff[i] = NULL;
963 static void alloc_tbufs(struct net_device* dev)
965 struct rhine_private *rp = netdev_priv(dev);
969 rp->dirty_tx = rp->cur_tx = 0;
970 next = rp->tx_ring_dma;
971 for (i = 0; i < TX_RING_SIZE; i++) {
972 rp->tx_skbuff[i] = NULL;
973 rp->tx_ring[i].tx_status = 0;
974 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
975 next += sizeof(struct tx_desc);
976 rp->tx_ring[i].next_desc = cpu_to_le32(next);
977 if (rp->quirks & rqRhineI)
978 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
980 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
984 static void free_tbufs(struct net_device* dev)
986 struct rhine_private *rp = netdev_priv(dev);
989 for (i = 0; i < TX_RING_SIZE; i++) {
990 rp->tx_ring[i].tx_status = 0;
991 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
992 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
993 if (rp->tx_skbuff[i]) {
994 if (rp->tx_skbuff_dma[i]) {
995 pci_unmap_single(rp->pdev,
996 rp->tx_skbuff_dma[i],
997 rp->tx_skbuff[i]->len,
1000 dev_kfree_skb(rp->tx_skbuff[i]);
1002 rp->tx_skbuff[i] = NULL;
1003 rp->tx_buf[i] = NULL;
1007 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1009 struct rhine_private *rp = netdev_priv(dev);
1010 void __iomem *ioaddr = rp->base;
1012 mii_check_media(&rp->mii_if, debug, init_media);
1014 if (rp->mii_if.full_duplex)
1015 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1018 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1021 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1022 rp->mii_if.force_media, netif_carrier_ok(dev));
1025 /* Called after status of force_media possibly changed */
1026 static void rhine_set_carrier(struct mii_if_info *mii)
1028 if (mii->force_media) {
1029 /* autoneg is off: Link is always assumed to be up */
1030 if (!netif_carrier_ok(mii->dev))
1031 netif_carrier_on(mii->dev);
1033 else /* Let MMI library update carrier status */
1034 rhine_check_media(mii->dev, 0);
1036 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1037 mii->dev->name, mii->force_media,
1038 netif_carrier_ok(mii->dev));
1041 static void init_registers(struct net_device *dev)
1043 struct rhine_private *rp = netdev_priv(dev);
1044 void __iomem *ioaddr = rp->base;
1047 for (i = 0; i < 6; i++)
1048 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1050 /* Initialize other registers. */
1051 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1052 /* Configure initial FIFO thresholds. */
1053 iowrite8(0x20, ioaddr + TxConfig);
1054 rp->tx_thresh = 0x20;
1055 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1057 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1058 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1060 rhine_set_rx_mode(dev);
1062 #ifdef CONFIG_VIA_RHINE_NAPI
1063 napi_enable(&rp->napi);
1066 /* Enable interrupts by setting the interrupt mask. */
1067 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1068 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1069 IntrTxDone | IntrTxError | IntrTxUnderrun |
1070 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1071 ioaddr + IntrEnable);
1073 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1075 rhine_check_media(dev, 1);
1078 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1079 static void rhine_enable_linkmon(void __iomem *ioaddr)
1081 iowrite8(0, ioaddr + MIICmd);
1082 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1083 iowrite8(0x80, ioaddr + MIICmd);
1085 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1087 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1090 /* Disable MII link status auto-polling (required for MDIO access) */
1091 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1093 iowrite8(0, ioaddr + MIICmd);
1095 if (quirks & rqRhineI) {
1096 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1098 /* Can be called from ISR. Evil. */
1101 /* 0x80 must be set immediately before turning it off */
1102 iowrite8(0x80, ioaddr + MIICmd);
1104 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1106 /* Heh. Now clear 0x80 again. */
1107 iowrite8(0, ioaddr + MIICmd);
1110 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1113 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1115 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1117 struct rhine_private *rp = netdev_priv(dev);
1118 void __iomem *ioaddr = rp->base;
1121 rhine_disable_linkmon(ioaddr, rp->quirks);
1123 /* rhine_disable_linkmon already cleared MIICmd */
1124 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1125 iowrite8(regnum, ioaddr + MIIRegAddr);
1126 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1127 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1128 result = ioread16(ioaddr + MIIData);
1130 rhine_enable_linkmon(ioaddr);
1134 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1136 struct rhine_private *rp = netdev_priv(dev);
1137 void __iomem *ioaddr = rp->base;
1139 rhine_disable_linkmon(ioaddr, rp->quirks);
1141 /* rhine_disable_linkmon already cleared MIICmd */
1142 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1143 iowrite8(regnum, ioaddr + MIIRegAddr);
1144 iowrite16(value, ioaddr + MIIData);
1145 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1146 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1148 rhine_enable_linkmon(ioaddr);
1151 static int rhine_open(struct net_device *dev)
1153 struct rhine_private *rp = netdev_priv(dev);
1154 void __iomem *ioaddr = rp->base;
1157 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1163 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1164 dev->name, rp->pdev->irq);
1166 rc = alloc_ring(dev);
1168 free_irq(rp->pdev->irq, dev);
1173 rhine_chip_reset(dev);
1174 init_registers(dev);
1176 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1177 "MII status: %4.4x.\n",
1178 dev->name, ioread16(ioaddr + ChipCmd),
1179 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1181 netif_start_queue(dev);
1186 static void rhine_tx_timeout(struct net_device *dev)
1188 struct rhine_private *rp = netdev_priv(dev);
1189 void __iomem *ioaddr = rp->base;
1191 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1192 "%4.4x, resetting...\n",
1193 dev->name, ioread16(ioaddr + IntrStatus),
1194 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1196 /* protect against concurrent rx interrupts */
1197 disable_irq(rp->pdev->irq);
1199 #ifdef CONFIG_VIA_RHINE_NAPI
1200 napi_disable(&rp->napi);
1203 spin_lock(&rp->lock);
1205 /* clear all descriptors */
1211 /* Reinitialize the hardware. */
1212 rhine_chip_reset(dev);
1213 init_registers(dev);
1215 spin_unlock(&rp->lock);
1216 enable_irq(rp->pdev->irq);
1218 dev->trans_start = jiffies;
1219 rp->stats.tx_errors++;
1220 netif_wake_queue(dev);
1223 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1225 struct rhine_private *rp = netdev_priv(dev);
1226 void __iomem *ioaddr = rp->base;
1229 /* Caution: the write order is important here, set the field
1230 with the "ownership" bits last. */
1232 /* Calculate the next Tx descriptor entry. */
1233 entry = rp->cur_tx % TX_RING_SIZE;
1235 if (skb_padto(skb, ETH_ZLEN))
1238 rp->tx_skbuff[entry] = skb;
1240 if ((rp->quirks & rqRhineI) &&
1241 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1242 /* Must use alignment buffer. */
1243 if (skb->len > PKT_BUF_SZ) {
1244 /* packet too long, drop it */
1246 rp->tx_skbuff[entry] = NULL;
1247 rp->stats.tx_dropped++;
1251 /* Padding is not copied and so must be redone. */
1252 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1253 if (skb->len < ETH_ZLEN)
1254 memset(rp->tx_buf[entry] + skb->len, 0,
1255 ETH_ZLEN - skb->len);
1256 rp->tx_skbuff_dma[entry] = 0;
1257 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1258 (rp->tx_buf[entry] -
1261 rp->tx_skbuff_dma[entry] =
1262 pci_map_single(rp->pdev, skb->data, skb->len,
1264 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1267 rp->tx_ring[entry].desc_length =
1268 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1271 spin_lock_irq(&rp->lock);
1273 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1278 /* Non-x86 Todo: explicitly flush cache lines here. */
1280 /* Wake the potentially-idle transmit channel */
1281 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1285 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1286 netif_stop_queue(dev);
1288 dev->trans_start = jiffies;
1290 spin_unlock_irq(&rp->lock);
1293 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1294 dev->name, rp->cur_tx-1, entry);
1299 /* The interrupt handler does all of the Rx thread work and cleans up
1300 after the Tx thread. */
1301 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1303 struct net_device *dev = dev_instance;
1304 struct rhine_private *rp = netdev_priv(dev);
1305 void __iomem *ioaddr = rp->base;
1307 int boguscnt = max_interrupt_work;
1310 while ((intr_status = get_intr_status(dev))) {
1313 /* Acknowledge all of the current interrupt sources ASAP. */
1314 if (intr_status & IntrTxDescRace)
1315 iowrite8(0x08, ioaddr + IntrStatus2);
1316 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1320 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1321 dev->name, intr_status);
1323 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1324 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1325 #ifdef CONFIG_VIA_RHINE_NAPI
1326 iowrite16(IntrTxAborted |
1327 IntrTxDone | IntrTxError | IntrTxUnderrun |
1328 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1329 ioaddr + IntrEnable);
1331 netif_rx_schedule(dev, &rp->napi);
1333 rhine_rx(dev, RX_RING_SIZE);
1337 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1338 if (intr_status & IntrTxErrSummary) {
1339 /* Avoid scavenging before Tx engine turned off */
1340 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1342 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1343 printk(KERN_WARNING "%s: "
1344 "rhine_interrupt() Tx engine"
1345 "still on.\n", dev->name);
1350 /* Abnormal error summary/uncommon events handlers. */
1351 if (intr_status & (IntrPCIErr | IntrLinkChange |
1352 IntrStatsMax | IntrTxError | IntrTxAborted |
1353 IntrTxUnderrun | IntrTxDescRace))
1354 rhine_error(dev, intr_status);
1356 if (--boguscnt < 0) {
1357 printk(KERN_WARNING "%s: Too much work at interrupt, "
1359 dev->name, intr_status);
1365 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1366 dev->name, ioread16(ioaddr + IntrStatus));
1367 return IRQ_RETVAL(handled);
1370 /* This routine is logically part of the interrupt handler, but isolated
1372 static void rhine_tx(struct net_device *dev)
1374 struct rhine_private *rp = netdev_priv(dev);
1375 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1377 spin_lock(&rp->lock);
1379 /* find and cleanup dirty tx descriptors */
1380 while (rp->dirty_tx != rp->cur_tx) {
1381 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1383 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1385 if (txstatus & DescOwn)
1387 if (txstatus & 0x8000) {
1389 printk(KERN_DEBUG "%s: Transmit error, "
1390 "Tx status %8.8x.\n",
1391 dev->name, txstatus);
1392 rp->stats.tx_errors++;
1393 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1394 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1395 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1396 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1397 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1398 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1399 rp->stats.tx_fifo_errors++;
1400 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1401 break; /* Keep the skb - we try again */
1403 /* Transmitter restarted in 'abnormal' handler. */
1405 if (rp->quirks & rqRhineI)
1406 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1408 rp->stats.collisions += txstatus & 0x0F;
1410 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1411 (txstatus >> 3) & 0xF,
1413 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1414 rp->stats.tx_packets++;
1416 /* Free the original skb. */
1417 if (rp->tx_skbuff_dma[entry]) {
1418 pci_unmap_single(rp->pdev,
1419 rp->tx_skbuff_dma[entry],
1420 rp->tx_skbuff[entry]->len,
1423 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1424 rp->tx_skbuff[entry] = NULL;
1425 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1427 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1428 netif_wake_queue(dev);
1430 spin_unlock(&rp->lock);
1433 /* Process up to limit frames from receive ring */
1434 static int rhine_rx(struct net_device *dev, int limit)
1436 struct rhine_private *rp = netdev_priv(dev);
1438 int entry = rp->cur_rx % RX_RING_SIZE;
1441 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1443 le32_to_cpu(rp->rx_head_desc->rx_status));
1446 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1447 for (count = 0; count < limit; ++count) {
1448 struct rx_desc *desc = rp->rx_head_desc;
1449 u32 desc_status = le32_to_cpu(desc->rx_status);
1450 int data_size = desc_status >> 16;
1452 if (desc_status & DescOwn)
1456 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1459 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1460 if ((desc_status & RxWholePkt) != RxWholePkt) {
1461 printk(KERN_WARNING "%s: Oversized Ethernet "
1462 "frame spanned multiple buffers, entry "
1463 "%#x length %d status %8.8x!\n",
1464 dev->name, entry, data_size,
1466 printk(KERN_WARNING "%s: Oversized Ethernet "
1467 "frame %p vs %p.\n", dev->name,
1468 rp->rx_head_desc, &rp->rx_ring[entry]);
1469 rp->stats.rx_length_errors++;
1470 } else if (desc_status & RxErr) {
1471 /* There was a error. */
1473 printk(KERN_DEBUG "rhine_rx() Rx "
1474 "error was %8.8x.\n",
1476 rp->stats.rx_errors++;
1477 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1478 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1479 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1480 if (desc_status & 0x0002) {
1481 /* this can also be updated outside the interrupt handler */
1482 spin_lock(&rp->lock);
1483 rp->stats.rx_crc_errors++;
1484 spin_unlock(&rp->lock);
1488 struct sk_buff *skb;
1489 /* Length should omit the CRC */
1490 int pkt_len = data_size - 4;
1492 /* Check if the packet is long enough to accept without
1493 copying to a minimally-sized skbuff. */
1494 if (pkt_len < rx_copybreak &&
1495 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1496 skb_reserve(skb, 2); /* 16 byte align the IP header */
1497 pci_dma_sync_single_for_cpu(rp->pdev,
1498 rp->rx_skbuff_dma[entry],
1500 PCI_DMA_FROMDEVICE);
1502 skb_copy_to_linear_data(skb,
1503 rp->rx_skbuff[entry]->data,
1505 skb_put(skb, pkt_len);
1506 pci_dma_sync_single_for_device(rp->pdev,
1507 rp->rx_skbuff_dma[entry],
1509 PCI_DMA_FROMDEVICE);
1511 skb = rp->rx_skbuff[entry];
1513 printk(KERN_ERR "%s: Inconsistent Rx "
1514 "descriptor chain.\n",
1518 rp->rx_skbuff[entry] = NULL;
1519 skb_put(skb, pkt_len);
1520 pci_unmap_single(rp->pdev,
1521 rp->rx_skbuff_dma[entry],
1523 PCI_DMA_FROMDEVICE);
1525 skb->protocol = eth_type_trans(skb, dev);
1526 #ifdef CONFIG_VIA_RHINE_NAPI
1527 netif_receive_skb(skb);
1531 dev->last_rx = jiffies;
1532 rp->stats.rx_bytes += pkt_len;
1533 rp->stats.rx_packets++;
1535 entry = (++rp->cur_rx) % RX_RING_SIZE;
1536 rp->rx_head_desc = &rp->rx_ring[entry];
1539 /* Refill the Rx ring buffers. */
1540 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1541 struct sk_buff *skb;
1542 entry = rp->dirty_rx % RX_RING_SIZE;
1543 if (rp->rx_skbuff[entry] == NULL) {
1544 skb = dev_alloc_skb(rp->rx_buf_sz);
1545 rp->rx_skbuff[entry] = skb;
1547 break; /* Better luck next round. */
1548 skb->dev = dev; /* Mark as being used by this device. */
1549 rp->rx_skbuff_dma[entry] =
1550 pci_map_single(rp->pdev, skb->data,
1552 PCI_DMA_FROMDEVICE);
1553 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1555 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1562 * Clears the "tally counters" for CRC errors and missed frames(?).
1563 * It has been reported that some chips need a write of 0 to clear
1564 * these, for others the counters are set to 1 when written to and
1565 * instead cleared when read. So we clear them both ways ...
1567 static inline void clear_tally_counters(void __iomem *ioaddr)
1569 iowrite32(0, ioaddr + RxMissed);
1570 ioread16(ioaddr + RxCRCErrs);
1571 ioread16(ioaddr + RxMissed);
1574 static void rhine_restart_tx(struct net_device *dev) {
1575 struct rhine_private *rp = netdev_priv(dev);
1576 void __iomem *ioaddr = rp->base;
1577 int entry = rp->dirty_tx % TX_RING_SIZE;
1581 * If new errors occured, we need to sort them out before doing Tx.
1582 * In that case the ISR will be back here RSN anyway.
1584 intr_status = get_intr_status(dev);
1586 if ((intr_status & IntrTxErrSummary) == 0) {
1588 /* We know better than the chip where it should continue. */
1589 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1590 ioaddr + TxRingPtr);
1592 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1594 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1599 /* This should never happen */
1601 printk(KERN_WARNING "%s: rhine_restart_tx() "
1602 "Another error occured %8.8x.\n",
1603 dev->name, intr_status);
1608 static void rhine_error(struct net_device *dev, int intr_status)
1610 struct rhine_private *rp = netdev_priv(dev);
1611 void __iomem *ioaddr = rp->base;
1613 spin_lock(&rp->lock);
1615 if (intr_status & IntrLinkChange)
1616 rhine_check_media(dev, 0);
1617 if (intr_status & IntrStatsMax) {
1618 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1619 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1620 clear_tally_counters(ioaddr);
1622 if (intr_status & IntrTxAborted) {
1624 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1625 dev->name, intr_status);
1627 if (intr_status & IntrTxUnderrun) {
1628 if (rp->tx_thresh < 0xE0)
1629 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1631 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1632 "threshold now %2.2x.\n",
1633 dev->name, rp->tx_thresh);
1635 if (intr_status & IntrTxDescRace) {
1637 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1640 if ((intr_status & IntrTxError) &&
1641 (intr_status & (IntrTxAborted |
1642 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1643 if (rp->tx_thresh < 0xE0) {
1644 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1647 printk(KERN_INFO "%s: Unspecified error. Tx "
1648 "threshold now %2.2x.\n",
1649 dev->name, rp->tx_thresh);
1651 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1653 rhine_restart_tx(dev);
1655 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1656 IntrTxError | IntrTxAborted | IntrNormalSummary |
1659 printk(KERN_ERR "%s: Something Wicked happened! "
1660 "%8.8x.\n", dev->name, intr_status);
1663 spin_unlock(&rp->lock);
1666 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1668 struct rhine_private *rp = netdev_priv(dev);
1669 void __iomem *ioaddr = rp->base;
1670 unsigned long flags;
1672 spin_lock_irqsave(&rp->lock, flags);
1673 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1674 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1675 clear_tally_counters(ioaddr);
1676 spin_unlock_irqrestore(&rp->lock, flags);
1681 static void rhine_set_rx_mode(struct net_device *dev)
1683 struct rhine_private *rp = netdev_priv(dev);
1684 void __iomem *ioaddr = rp->base;
1685 u32 mc_filter[2]; /* Multicast hash filter */
1686 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1688 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1690 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1691 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1692 } else if ((dev->mc_count > multicast_filter_limit)
1693 || (dev->flags & IFF_ALLMULTI)) {
1694 /* Too many to match, or accept all multicasts. */
1695 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1696 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1699 struct dev_mc_list *mclist;
1701 memset(mc_filter, 0, sizeof(mc_filter));
1702 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1703 i++, mclist = mclist->next) {
1704 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1706 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1708 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1709 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1712 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1715 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1717 struct rhine_private *rp = netdev_priv(dev);
1719 strcpy(info->driver, DRV_NAME);
1720 strcpy(info->version, DRV_VERSION);
1721 strcpy(info->bus_info, pci_name(rp->pdev));
1724 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1726 struct rhine_private *rp = netdev_priv(dev);
1729 spin_lock_irq(&rp->lock);
1730 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1731 spin_unlock_irq(&rp->lock);
1736 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1738 struct rhine_private *rp = netdev_priv(dev);
1741 spin_lock_irq(&rp->lock);
1742 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1743 spin_unlock_irq(&rp->lock);
1744 rhine_set_carrier(&rp->mii_if);
1749 static int netdev_nway_reset(struct net_device *dev)
1751 struct rhine_private *rp = netdev_priv(dev);
1753 return mii_nway_restart(&rp->mii_if);
1756 static u32 netdev_get_link(struct net_device *dev)
1758 struct rhine_private *rp = netdev_priv(dev);
1760 return mii_link_ok(&rp->mii_if);
1763 static u32 netdev_get_msglevel(struct net_device *dev)
1768 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1773 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1775 struct rhine_private *rp = netdev_priv(dev);
1777 if (!(rp->quirks & rqWOL))
1780 spin_lock_irq(&rp->lock);
1781 wol->supported = WAKE_PHY | WAKE_MAGIC |
1782 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1783 wol->wolopts = rp->wolopts;
1784 spin_unlock_irq(&rp->lock);
1787 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1789 struct rhine_private *rp = netdev_priv(dev);
1790 u32 support = WAKE_PHY | WAKE_MAGIC |
1791 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1793 if (!(rp->quirks & rqWOL))
1796 if (wol->wolopts & ~support)
1799 spin_lock_irq(&rp->lock);
1800 rp->wolopts = wol->wolopts;
1801 spin_unlock_irq(&rp->lock);
1806 static const struct ethtool_ops netdev_ethtool_ops = {
1807 .get_drvinfo = netdev_get_drvinfo,
1808 .get_settings = netdev_get_settings,
1809 .set_settings = netdev_set_settings,
1810 .nway_reset = netdev_nway_reset,
1811 .get_link = netdev_get_link,
1812 .get_msglevel = netdev_get_msglevel,
1813 .set_msglevel = netdev_set_msglevel,
1814 .get_wol = rhine_get_wol,
1815 .set_wol = rhine_set_wol,
1816 .get_sg = ethtool_op_get_sg,
1817 .get_tx_csum = ethtool_op_get_tx_csum,
1820 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1822 struct rhine_private *rp = netdev_priv(dev);
1825 if (!netif_running(dev))
1828 spin_lock_irq(&rp->lock);
1829 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1830 spin_unlock_irq(&rp->lock);
1831 rhine_set_carrier(&rp->mii_if);
1836 static int rhine_close(struct net_device *dev)
1838 struct rhine_private *rp = netdev_priv(dev);
1839 void __iomem *ioaddr = rp->base;
1841 spin_lock_irq(&rp->lock);
1843 netif_stop_queue(dev);
1844 #ifdef CONFIG_VIA_RHINE_NAPI
1845 napi_disable(&rp->napi);
1849 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1850 "status was %4.4x.\n",
1851 dev->name, ioread16(ioaddr + ChipCmd));
1853 /* Switch to loopback mode to avoid hardware races. */
1854 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1856 /* Disable interrupts by clearing the interrupt mask. */
1857 iowrite16(0x0000, ioaddr + IntrEnable);
1859 /* Stop the chip's Tx and Rx processes. */
1860 iowrite16(CmdStop, ioaddr + ChipCmd);
1862 spin_unlock_irq(&rp->lock);
1864 free_irq(rp->pdev->irq, dev);
1873 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1875 struct net_device *dev = pci_get_drvdata(pdev);
1876 struct rhine_private *rp = netdev_priv(dev);
1878 unregister_netdev(dev);
1880 pci_iounmap(pdev, rp->base);
1881 pci_release_regions(pdev);
1884 pci_disable_device(pdev);
1885 pci_set_drvdata(pdev, NULL);
1888 static void rhine_shutdown (struct pci_dev *pdev)
1890 struct net_device *dev = pci_get_drvdata(pdev);
1891 struct rhine_private *rp = netdev_priv(dev);
1892 void __iomem *ioaddr = rp->base;
1894 if (!(rp->quirks & rqWOL))
1895 return; /* Nothing to do for non-WOL adapters */
1897 rhine_power_init(dev);
1899 /* Make sure we use pattern 0, 1 and not 4, 5 */
1900 if (rp->quirks & rq6patterns)
1901 iowrite8(0x04, ioaddr + 0xA7);
1903 if (rp->wolopts & WAKE_MAGIC) {
1904 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1906 * Turn EEPROM-controlled wake-up back on -- some hardware may
1907 * not cooperate otherwise.
1909 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1912 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1913 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1915 if (rp->wolopts & WAKE_PHY)
1916 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1918 if (rp->wolopts & WAKE_UCAST)
1919 iowrite8(WOLucast, ioaddr + WOLcrSet);
1922 /* Enable legacy WOL (for old motherboards) */
1923 iowrite8(0x01, ioaddr + PwcfgSet);
1924 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1927 /* Hit power state D3 (sleep) */
1929 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1931 /* TODO: Check use of pci_enable_wake() */
1936 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1938 struct net_device *dev = pci_get_drvdata(pdev);
1939 struct rhine_private *rp = netdev_priv(dev);
1940 unsigned long flags;
1942 if (!netif_running(dev))
1945 #ifdef CONFIG_VIA_RHINE_NAPI
1946 napi_disable(&rp->napi);
1948 netif_device_detach(dev);
1949 pci_save_state(pdev);
1951 spin_lock_irqsave(&rp->lock, flags);
1952 rhine_shutdown(pdev);
1953 spin_unlock_irqrestore(&rp->lock, flags);
1955 free_irq(dev->irq, dev);
1959 static int rhine_resume(struct pci_dev *pdev)
1961 struct net_device *dev = pci_get_drvdata(pdev);
1962 struct rhine_private *rp = netdev_priv(dev);
1963 unsigned long flags;
1966 if (!netif_running(dev))
1969 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1970 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1972 ret = pci_set_power_state(pdev, PCI_D0);
1974 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1975 dev->name, ret ? "failed" : "succeeded", ret);
1977 pci_restore_state(pdev);
1979 spin_lock_irqsave(&rp->lock, flags);
1981 enable_mmio(rp->pioaddr, rp->quirks);
1983 rhine_power_init(dev);
1988 init_registers(dev);
1989 spin_unlock_irqrestore(&rp->lock, flags);
1991 netif_device_attach(dev);
1995 #endif /* CONFIG_PM */
1997 static struct pci_driver rhine_driver = {
1999 .id_table = rhine_pci_tbl,
2000 .probe = rhine_init_one,
2001 .remove = __devexit_p(rhine_remove_one),
2003 .suspend = rhine_suspend,
2004 .resume = rhine_resume,
2005 #endif /* CONFIG_PM */
2006 .shutdown = rhine_shutdown,
2009 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2013 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2014 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2020 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2021 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2027 static int __init rhine_init(void)
2029 /* when a module, this is printed whether or not devices are found in probe */
2033 if (dmi_check_system(rhine_dmi_table)) {
2034 /* these BIOSes fail at PXE boot if chip is in D3 */
2036 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2041 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2043 return pci_register_driver(&rhine_driver);
2047 static void __exit rhine_cleanup(void)
2049 pci_unregister_driver(&rhine_driver);
2053 module_init(rhine_init);
2054 module_exit(rhine_cleanup);