1 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3 * Copyright (C) 2004 Sun Microsystems Inc.
4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * This driver uses the sungem driver (c) David Miller
22 * (davem@redhat.com) as its basis.
24 * The cassini chip has a number of features that distinguish it from
26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or
27 * load balancing (non-VLAN mode)
28 * batching of multiple packets
29 * multiple CPU dispatching
30 * page-based RX descriptor engine with separate completion rings
31 * Gigabit support (GMII and PCS interface)
32 * MIF link up/down detection works
34 * RX is handled by page sized buffers that are attached as fragments to
35 * the skb. here's what's done:
36 * -- driver allocates pages at a time and keeps reference counts
38 * -- the upper protocol layers assume that the header is in the skb
39 * itself. as a result, cassini will copy a small amount (64 bytes)
41 * -- driver appends the rest of the data pages as frags to skbuffs
42 * and increments the reference count
43 * -- on page reclamation, the driver swaps the page with a spare page.
44 * if that page is still in use, it frees its reference to that page,
45 * and allocates a new page for use. otherwise, it just recycles the
48 * NOTE: cassini can parse the header. however, it's not worth it
49 * as long as the network stack requires a header copy.
51 * TX has 4 queues. currently these queues are used in a round-robin
52 * fashion for load balancing. They can also be used for QoS. for that
53 * to work, however, QoS information needs to be exposed down to the driver
54 * level so that subqueues get targetted to particular transmit rings.
55 * alternatively, the queues can be configured via use of the all-purpose
58 * RX DATA: the rx completion ring has all the info, but the rx desc
59 * ring has all of the data. RX can conceivably come in under multiple
60 * interrupts, but the INT# assignment needs to be set up properly by
61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62 * that. also, the two descriptor rings are designed to distinguish between
63 * encrypted and non-encrypted packets, but we use them for buffering
66 * by default, the selective clear mask is set up to process rx packets.
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/types.h>
73 #include <linux/compiler.h>
74 #include <linux/slab.h>
75 #include <linux/delay.h>
76 #include <linux/init.h>
77 #include <linux/vmalloc.h>
78 #include <linux/ioport.h>
79 #include <linux/pci.h>
81 #include <linux/highmem.h>
82 #include <linux/list.h>
83 #include <linux/dma-mapping.h>
85 #include <linux/netdevice.h>
86 #include <linux/etherdevice.h>
87 #include <linux/skbuff.h>
88 #include <linux/ethtool.h>
89 #include <linux/crc32.h>
90 #include <linux/random.h>
91 #include <linux/mii.h>
93 #include <linux/tcp.h>
94 #include <linux/mutex.h>
95 #include <linux/firmware.h>
97 #include <net/checksum.h>
99 #include <asm/atomic.h>
100 #include <asm/system.h>
102 #include <asm/byteorder.h>
103 #include <asm/uaccess.h>
105 #define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
106 #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
107 #define CAS_NCPUS num_online_cpus()
109 #if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
111 #define cas_skb_release(x) netif_receive_skb(x)
113 #define cas_skb_release(x) netif_rx(x)
116 /* select which firmware to use */
117 #define USE_HP_WORKAROUND
118 #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
119 #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
123 #define USE_TX_COMPWB /* use completion writeback registers */
124 #define USE_CSMA_CD_PROTO /* standard CSMA/CD */
125 #define USE_RX_BLANK /* hw interrupt mitigation */
126 #undef USE_ENTROPY_DEV /* don't test for entropy device */
128 /* NOTE: these aren't useable unless PCI interrupts can be assigned.
129 * also, we need to make cp->lock finer-grained.
136 #undef USE_VPD_DEBUG /* debug vpd information if defined */
138 /* rx processing options */
139 #define USE_PAGE_ORDER /* specify to allocate large rx pages */
140 #define RX_DONT_BATCH 0 /* if 1, don't batch flows */
141 #define RX_COPY_ALWAYS 0 /* if 0, use frags */
142 #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
143 #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
145 #define DRV_MODULE_NAME "cassini"
146 #define PFX DRV_MODULE_NAME ": "
147 #define DRV_MODULE_VERSION "1.6"
148 #define DRV_MODULE_RELDATE "21 May 2008"
150 #define CAS_DEF_MSG_ENABLE \
160 /* length of time before we decide the hardware is borked,
161 * and dev->tx_timeout() should be called to fix the problem
163 #define CAS_TX_TIMEOUT (HZ)
164 #define CAS_LINK_TIMEOUT (22*HZ/10)
165 #define CAS_LINK_FAST_TIMEOUT (1)
167 /* timeout values for state changing. these specify the number
168 * of 10us delays to be used before giving up.
170 #define STOP_TRIES_PHY 1000
171 #define STOP_TRIES 5000
173 /* specify a minimum frame size to deal with some fifo issues
174 * max mtu == 2 * page size - ethernet header - 64 - swivel =
175 * 2 * page_size - 0x50
177 #define CAS_MIN_FRAME 97
178 #define CAS_1000MB_MIN_FRAME 255
179 #define CAS_MIN_MTU 60
180 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
184 * Eliminate these and use separate atomic counters for each, to
185 * avoid a race condition.
188 #define CAS_RESET_MTU 1
189 #define CAS_RESET_ALL 2
190 #define CAS_RESET_SPARE 3
193 static char version[] __devinitdata =
194 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
196 static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
197 static int link_mode;
199 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
200 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
201 MODULE_LICENSE("GPL");
202 MODULE_FIRMWARE("sun/cassini.bin");
203 module_param(cassini_debug, int, 0);
204 MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
205 module_param(link_mode, int, 0);
206 MODULE_PARM_DESC(link_mode, "default link mode");
209 * Work around for a PCS bug in which the link goes down due to the chip
210 * being confused and never showing a link status of "up."
212 #define DEFAULT_LINKDOWN_TIMEOUT 5
214 * Value in seconds, for user input.
216 static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
217 module_param(linkdown_timeout, int, 0);
218 MODULE_PARM_DESC(linkdown_timeout,
219 "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
222 * value in 'ticks' (units used by jiffies). Set when we init the
223 * module because 'HZ' in actually a function call on some flavors of
224 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
226 static int link_transition_timeout;
230 static u16 link_modes[] __devinitdata = {
231 BMCR_ANENABLE, /* 0 : autoneg */
232 0, /* 1 : 10bt half duplex */
233 BMCR_SPEED100, /* 2 : 100bt half duplex */
234 BMCR_FULLDPLX, /* 3 : 10bt full duplex */
235 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
239 static struct pci_device_id cas_pci_tbl[] __devinitdata = {
240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247 MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
249 static void cas_set_link_modes(struct cas *cp);
251 static inline void cas_lock_tx(struct cas *cp)
255 for (i = 0; i < N_TX_RINGS; i++)
256 spin_lock(&cp->tx_lock[i]);
259 static inline void cas_lock_all(struct cas *cp)
261 spin_lock_irq(&cp->lock);
265 /* WTZ: QA was finding deadlock problems with the previous
266 * versions after long test runs with multiple cards per machine.
267 * See if replacing cas_lock_all with safer versions helps. The
268 * symptoms QA is reporting match those we'd expect if interrupts
269 * aren't being properly restored, and we fixed a previous deadlock
270 * with similar symptoms by using save/restore versions in other
273 #define cas_lock_all_save(cp, flags) \
275 struct cas *xxxcp = (cp); \
276 spin_lock_irqsave(&xxxcp->lock, flags); \
277 cas_lock_tx(xxxcp); \
280 static inline void cas_unlock_tx(struct cas *cp)
284 for (i = N_TX_RINGS; i > 0; i--)
285 spin_unlock(&cp->tx_lock[i - 1]);
288 static inline void cas_unlock_all(struct cas *cp)
291 spin_unlock_irq(&cp->lock);
294 #define cas_unlock_all_restore(cp, flags) \
296 struct cas *xxxcp = (cp); \
297 cas_unlock_tx(xxxcp); \
298 spin_unlock_irqrestore(&xxxcp->lock, flags); \
301 static void cas_disable_irq(struct cas *cp, const int ring)
303 /* Make sure we won't get any more interrupts */
305 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
309 /* disable completion interrupts and selectively mask */
310 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
312 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
322 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
323 cp->regs + REG_PLUS_INTRN_MASK(ring));
327 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
328 REG_PLUS_INTRN_MASK(ring));
334 static inline void cas_mask_intr(struct cas *cp)
338 for (i = 0; i < N_RX_COMP_RINGS; i++)
339 cas_disable_irq(cp, i);
342 static void cas_enable_irq(struct cas *cp, const int ring)
344 if (ring == 0) { /* all but TX_DONE */
345 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
349 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
351 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
361 writel(INTRN_MASK_RX_EN, cp->regs +
362 REG_PLUS_INTRN_MASK(ring));
371 static inline void cas_unmask_intr(struct cas *cp)
375 for (i = 0; i < N_RX_COMP_RINGS; i++)
376 cas_enable_irq(cp, i);
379 static inline void cas_entropy_gather(struct cas *cp)
381 #ifdef USE_ENTROPY_DEV
382 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
385 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
386 readl(cp->regs + REG_ENTROPY_IV),
391 static inline void cas_entropy_reset(struct cas *cp)
393 #ifdef USE_ENTROPY_DEV
394 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
397 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
398 cp->regs + REG_BIM_LOCAL_DEV_EN);
399 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
400 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
402 /* if we read back 0x0, we don't have an entropy device */
403 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
404 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
408 /* access to the phy. the following assumes that we've initialized the MIF to
409 * be in frame rather than bit-bang mode
411 static u16 cas_phy_read(struct cas *cp, int reg)
414 int limit = STOP_TRIES_PHY;
416 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
417 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
418 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
419 cmd |= MIF_FRAME_TURN_AROUND_MSB;
420 writel(cmd, cp->regs + REG_MIF_FRAME);
422 /* poll for completion */
423 while (limit-- > 0) {
425 cmd = readl(cp->regs + REG_MIF_FRAME);
426 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
427 return (cmd & MIF_FRAME_DATA_MASK);
429 return 0xFFFF; /* -1 */
432 static int cas_phy_write(struct cas *cp, int reg, u16 val)
434 int limit = STOP_TRIES_PHY;
437 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
438 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
440 cmd |= MIF_FRAME_TURN_AROUND_MSB;
441 cmd |= val & MIF_FRAME_DATA_MASK;
442 writel(cmd, cp->regs + REG_MIF_FRAME);
444 /* poll for completion */
445 while (limit-- > 0) {
447 cmd = readl(cp->regs + REG_MIF_FRAME);
448 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
454 static void cas_phy_powerup(struct cas *cp)
456 u16 ctl = cas_phy_read(cp, MII_BMCR);
458 if ((ctl & BMCR_PDOWN) == 0)
461 cas_phy_write(cp, MII_BMCR, ctl);
464 static void cas_phy_powerdown(struct cas *cp)
466 u16 ctl = cas_phy_read(cp, MII_BMCR);
468 if (ctl & BMCR_PDOWN)
471 cas_phy_write(cp, MII_BMCR, ctl);
474 /* cp->lock held. note: the last put_page will free the buffer */
475 static int cas_page_free(struct cas *cp, cas_page_t *page)
477 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
479 __free_pages(page->buffer, cp->page_order);
484 #ifdef RX_COUNT_BUFFERS
485 #define RX_USED_ADD(x, y) ((x)->used += (y))
486 #define RX_USED_SET(x, y) ((x)->used = (y))
488 #define RX_USED_ADD(x, y)
489 #define RX_USED_SET(x, y)
492 /* local page allocation routines for the receive buffers. jumbo pages
493 * require at least 8K contiguous and 8K aligned buffers.
495 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
499 page = kmalloc(sizeof(cas_page_t), flags);
503 INIT_LIST_HEAD(&page->list);
504 RX_USED_SET(page, 0);
505 page->buffer = alloc_pages(flags, cp->page_order);
508 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
509 cp->page_size, PCI_DMA_FROMDEVICE);
517 /* initialize spare pool of rx buffers, but allocate during the open */
518 static void cas_spare_init(struct cas *cp)
520 spin_lock(&cp->rx_inuse_lock);
521 INIT_LIST_HEAD(&cp->rx_inuse_list);
522 spin_unlock(&cp->rx_inuse_lock);
524 spin_lock(&cp->rx_spare_lock);
525 INIT_LIST_HEAD(&cp->rx_spare_list);
526 cp->rx_spares_needed = RX_SPARE_COUNT;
527 spin_unlock(&cp->rx_spare_lock);
530 /* used on close. free all the spare buffers. */
531 static void cas_spare_free(struct cas *cp)
533 struct list_head list, *elem, *tmp;
535 /* free spare buffers */
536 INIT_LIST_HEAD(&list);
537 spin_lock(&cp->rx_spare_lock);
538 list_splice_init(&cp->rx_spare_list, &list);
539 spin_unlock(&cp->rx_spare_lock);
540 list_for_each_safe(elem, tmp, &list) {
541 cas_page_free(cp, list_entry(elem, cas_page_t, list));
544 INIT_LIST_HEAD(&list);
547 * Looks like Adrian had protected this with a different
548 * lock than used everywhere else to manipulate this list.
550 spin_lock(&cp->rx_inuse_lock);
551 list_splice_init(&cp->rx_inuse_list, &list);
552 spin_unlock(&cp->rx_inuse_lock);
554 spin_lock(&cp->rx_spare_lock);
555 list_splice_init(&cp->rx_inuse_list, &list);
556 spin_unlock(&cp->rx_spare_lock);
558 list_for_each_safe(elem, tmp, &list) {
559 cas_page_free(cp, list_entry(elem, cas_page_t, list));
563 /* replenish spares if needed */
564 static void cas_spare_recover(struct cas *cp, const gfp_t flags)
566 struct list_head list, *elem, *tmp;
569 /* check inuse list. if we don't need any more free buffers,
573 /* make a local copy of the list */
574 INIT_LIST_HEAD(&list);
575 spin_lock(&cp->rx_inuse_lock);
576 list_splice_init(&cp->rx_inuse_list, &list);
577 spin_unlock(&cp->rx_inuse_lock);
579 list_for_each_safe(elem, tmp, &list) {
580 cas_page_t *page = list_entry(elem, cas_page_t, list);
583 * With the lockless pagecache, cassini buffering scheme gets
584 * slightly less accurate: we might find that a page has an
585 * elevated reference count here, due to a speculative ref,
586 * and skip it as in-use. Ideally we would be able to reclaim
587 * it. However this would be such a rare case, it doesn't
588 * matter too much as we should pick it up the next time round.
590 * Importantly, if we find that the page has a refcount of 1
591 * here (our refcount), then we know it is definitely not inuse
592 * so we can reuse it.
594 if (page_count(page->buffer) > 1)
598 spin_lock(&cp->rx_spare_lock);
599 if (cp->rx_spares_needed > 0) {
600 list_add(elem, &cp->rx_spare_list);
601 cp->rx_spares_needed--;
602 spin_unlock(&cp->rx_spare_lock);
604 spin_unlock(&cp->rx_spare_lock);
605 cas_page_free(cp, page);
609 /* put any inuse buffers back on the list */
610 if (!list_empty(&list)) {
611 spin_lock(&cp->rx_inuse_lock);
612 list_splice(&list, &cp->rx_inuse_list);
613 spin_unlock(&cp->rx_inuse_lock);
616 spin_lock(&cp->rx_spare_lock);
617 needed = cp->rx_spares_needed;
618 spin_unlock(&cp->rx_spare_lock);
622 /* we still need spares, so try to allocate some */
623 INIT_LIST_HEAD(&list);
626 cas_page_t *spare = cas_page_alloc(cp, flags);
629 list_add(&spare->list, &list);
633 spin_lock(&cp->rx_spare_lock);
634 list_splice(&list, &cp->rx_spare_list);
635 cp->rx_spares_needed -= i;
636 spin_unlock(&cp->rx_spare_lock);
639 /* pull a page from the list. */
640 static cas_page_t *cas_page_dequeue(struct cas *cp)
642 struct list_head *entry;
645 spin_lock(&cp->rx_spare_lock);
646 if (list_empty(&cp->rx_spare_list)) {
647 /* try to do a quick recovery */
648 spin_unlock(&cp->rx_spare_lock);
649 cas_spare_recover(cp, GFP_ATOMIC);
650 spin_lock(&cp->rx_spare_lock);
651 if (list_empty(&cp->rx_spare_list)) {
652 if (netif_msg_rx_err(cp))
653 printk(KERN_ERR "%s: no spare buffers "
654 "available.\n", cp->dev->name);
655 spin_unlock(&cp->rx_spare_lock);
660 entry = cp->rx_spare_list.next;
662 recover = ++cp->rx_spares_needed;
663 spin_unlock(&cp->rx_spare_lock);
665 /* trigger the timer to do the recovery */
666 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
668 atomic_inc(&cp->reset_task_pending);
669 atomic_inc(&cp->reset_task_pending_spare);
670 schedule_work(&cp->reset_task);
672 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
673 schedule_work(&cp->reset_task);
676 return list_entry(entry, cas_page_t, list);
680 static void cas_mif_poll(struct cas *cp, const int enable)
684 cfg = readl(cp->regs + REG_MIF_CFG);
685 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
687 if (cp->phy_type & CAS_PHY_MII_MDIO1)
688 cfg |= MIF_CFG_PHY_SELECT;
690 /* poll and interrupt on link status change. */
692 cfg |= MIF_CFG_POLL_EN;
693 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
694 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
696 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
697 cp->regs + REG_MIF_MASK);
698 writel(cfg, cp->regs + REG_MIF_CFG);
701 /* Must be invoked under cp->lock */
702 static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
708 int oldstate = cp->lstate;
709 int link_was_not_down = !(oldstate == link_down);
711 /* Setup link parameters */
714 lcntl = cp->link_cntl;
715 if (ep->autoneg == AUTONEG_ENABLE)
716 cp->link_cntl = BMCR_ANENABLE;
719 if (ep->speed == SPEED_100)
720 cp->link_cntl |= BMCR_SPEED100;
721 else if (ep->speed == SPEED_1000)
722 cp->link_cntl |= CAS_BMCR_SPEED1000;
723 if (ep->duplex == DUPLEX_FULL)
724 cp->link_cntl |= BMCR_FULLDPLX;
727 changed = (lcntl != cp->link_cntl);
730 if (cp->lstate == link_up) {
731 printk(KERN_INFO "%s: PCS link down.\n",
735 printk(KERN_INFO "%s: link configuration changed\n",
739 cp->lstate = link_down;
740 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
745 * WTZ: If the old state was link_up, we turn off the carrier
746 * to replicate everything we do elsewhere on a link-down
747 * event when we were already in a link-up state..
749 if (oldstate == link_up)
750 netif_carrier_off(cp->dev);
751 if (changed && link_was_not_down) {
753 * WTZ: This branch will simply schedule a full reset after
754 * we explicitly changed link modes in an ioctl. See if this
755 * fixes the link-problems we were having for forced mode.
757 atomic_inc(&cp->reset_task_pending);
758 atomic_inc(&cp->reset_task_pending_all);
759 schedule_work(&cp->reset_task);
761 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
765 if (cp->phy_type & CAS_PHY_SERDES) {
766 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
768 if (cp->link_cntl & BMCR_ANENABLE) {
769 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
770 cp->lstate = link_aneg;
772 if (cp->link_cntl & BMCR_FULLDPLX)
773 val |= PCS_MII_CTRL_DUPLEX;
774 val &= ~PCS_MII_AUTONEG_EN;
775 cp->lstate = link_force_ok;
777 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
778 writel(val, cp->regs + REG_PCS_MII_CTRL);
782 ctl = cas_phy_read(cp, MII_BMCR);
783 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
784 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
785 ctl |= cp->link_cntl;
786 if (ctl & BMCR_ANENABLE) {
787 ctl |= BMCR_ANRESTART;
788 cp->lstate = link_aneg;
790 cp->lstate = link_force_ok;
792 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
793 cas_phy_write(cp, MII_BMCR, ctl);
798 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
801 /* Must be invoked under cp->lock. */
802 static int cas_reset_mii_phy(struct cas *cp)
804 int limit = STOP_TRIES_PHY;
807 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
810 val = cas_phy_read(cp, MII_BMCR);
811 if ((val & BMCR_RESET) == 0)
818 static int cas_saturn_firmware_init(struct cas *cp)
820 const struct firmware *fw;
821 const char fw_name[] = "sun/cassini.bin";
824 if (PHY_NS_DP83065 != cp->phy_id)
827 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
829 printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n",
834 printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n",
839 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
840 cp->fw_size = fw->size - 2;
841 cp->fw_data = vmalloc(cp->fw_size);
844 printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err);
847 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
849 release_firmware(fw);
853 static void cas_saturn_firmware_load(struct cas *cp)
857 cas_phy_powerdown(cp);
859 /* expanded memory access mode */
860 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
862 /* pointer configuration for new firmware */
863 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
864 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
865 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
866 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
867 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
868 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
869 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
870 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
872 /* download new firmware */
873 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
874 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
875 for (i = 0; i < cp->fw_size; i++)
876 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
878 /* enable firmware */
879 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
880 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
884 /* phy initialization */
885 static void cas_phy_init(struct cas *cp)
889 /* if we're in MII/GMII mode, set up phy */
890 if (CAS_PHY_MII(cp->phy_type)) {
891 writel(PCS_DATAPATH_MODE_MII,
892 cp->regs + REG_PCS_DATAPATH_MODE);
895 cas_reset_mii_phy(cp); /* take out of isolate mode */
897 if (PHY_LUCENT_B0 == cp->phy_id) {
898 /* workaround link up/down issue with lucent */
899 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
900 cas_phy_write(cp, MII_BMCR, 0x00f1);
901 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
903 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
904 /* workarounds for broadcom phy */
905 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
906 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
907 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
908 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
909 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
910 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
911 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
912 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
913 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
914 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
915 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
917 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
918 val = cas_phy_read(cp, BROADCOM_MII_REG4);
919 val = cas_phy_read(cp, BROADCOM_MII_REG4);
921 /* link workaround */
922 cas_phy_write(cp, BROADCOM_MII_REG4,
926 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
927 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
928 SATURN_PCFG_FSI : 0x0,
929 cp->regs + REG_SATURN_PCFG);
931 /* load firmware to address 10Mbps auto-negotiation
932 * issue. NOTE: this will need to be changed if the
933 * default firmware gets fixed.
935 if (PHY_NS_DP83065 == cp->phy_id) {
936 cas_saturn_firmware_load(cp);
941 /* advertise capabilities */
942 val = cas_phy_read(cp, MII_BMCR);
943 val &= ~BMCR_ANENABLE;
944 cas_phy_write(cp, MII_BMCR, val);
947 cas_phy_write(cp, MII_ADVERTISE,
948 cas_phy_read(cp, MII_ADVERTISE) |
949 (ADVERTISE_10HALF | ADVERTISE_10FULL |
950 ADVERTISE_100HALF | ADVERTISE_100FULL |
951 CAS_ADVERTISE_PAUSE |
952 CAS_ADVERTISE_ASYM_PAUSE));
954 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
955 /* make sure that we don't advertise half
956 * duplex to avoid a chip issue
958 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
959 val &= ~CAS_ADVERTISE_1000HALF;
960 val |= CAS_ADVERTISE_1000FULL;
961 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
965 /* reset pcs for serdes */
969 writel(PCS_DATAPATH_MODE_SERDES,
970 cp->regs + REG_PCS_DATAPATH_MODE);
972 /* enable serdes pins on saturn */
973 if (cp->cas_flags & CAS_FLAG_SATURN)
974 writel(0, cp->regs + REG_SATURN_PCFG);
976 /* Reset PCS unit. */
977 val = readl(cp->regs + REG_PCS_MII_CTRL);
978 val |= PCS_MII_RESET;
979 writel(val, cp->regs + REG_PCS_MII_CTRL);
982 while (--limit > 0) {
984 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
989 printk(KERN_WARNING "%s: PCS reset bit would not "
990 "clear [%08x].\n", cp->dev->name,
991 readl(cp->regs + REG_PCS_STATE_MACHINE));
993 /* Make sure PCS is disabled while changing advertisement
996 writel(0x0, cp->regs + REG_PCS_CFG);
998 /* Advertise all capabilities except half-duplex. */
999 val = readl(cp->regs + REG_PCS_MII_ADVERT);
1000 val &= ~PCS_MII_ADVERT_HD;
1001 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
1002 PCS_MII_ADVERT_ASYM_PAUSE);
1003 writel(val, cp->regs + REG_PCS_MII_ADVERT);
1006 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
1008 /* pcs workaround: enable sync detect */
1009 writel(PCS_SERDES_CTRL_SYNCD_EN,
1010 cp->regs + REG_PCS_SERDES_CTRL);
1015 static int cas_pcs_link_check(struct cas *cp)
1017 u32 stat, state_machine;
1020 /* The link status bit latches on zero, so you must
1021 * read it twice in such a case to see a transition
1022 * to the link being up.
1024 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1025 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1026 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1028 /* The remote-fault indication is only valid
1029 * when autoneg has completed.
1031 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1032 PCS_MII_STATUS_REMOTE_FAULT)) ==
1033 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
1034 if (netif_msg_link(cp))
1035 printk(KERN_INFO "%s: PCS RemoteFault\n",
1039 /* work around link detection issue by querying the PCS state
1042 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1043 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1044 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1045 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1046 stat |= PCS_MII_STATUS_LINK_STATUS;
1049 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1050 if (cp->lstate != link_up) {
1052 cp->lstate = link_up;
1053 cp->link_transition = LINK_TRANSITION_LINK_UP;
1055 cas_set_link_modes(cp);
1056 netif_carrier_on(cp->dev);
1059 } else if (cp->lstate == link_up) {
1060 cp->lstate = link_down;
1061 if (link_transition_timeout != 0 &&
1062 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1063 !cp->link_transition_jiffies_valid) {
1065 * force a reset, as a workaround for the
1066 * link-failure problem. May want to move this to a
1067 * point a bit earlier in the sequence. If we had
1068 * generated a reset a short time ago, we'll wait for
1069 * the link timer to check the status until a
1070 * timer expires (link_transistion_jiffies_valid is
1071 * true when the timer is running.) Instead of using
1072 * a system timer, we just do a check whenever the
1073 * link timer is running - this clears the flag after
1077 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1078 cp->link_transition_jiffies = jiffies;
1079 cp->link_transition_jiffies_valid = 1;
1081 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1083 netif_carrier_off(cp->dev);
1084 if (cp->opened && netif_msg_link(cp)) {
1085 printk(KERN_INFO "%s: PCS link down.\n",
1089 /* Cassini only: if you force a mode, there can be
1090 * sync problems on link down. to fix that, the following
1091 * things need to be checked:
1092 * 1) read serialink state register
1093 * 2) read pcs status register to verify link down.
1094 * 3) if link down and serial link == 0x03, then you need
1095 * to global reset the chip.
1097 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1098 /* should check to see if we're in a forced mode */
1099 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1103 } else if (cp->lstate == link_down) {
1104 if (link_transition_timeout != 0 &&
1105 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1106 !cp->link_transition_jiffies_valid) {
1107 /* force a reset, as a workaround for the
1108 * link-failure problem. May want to move
1109 * this to a point a bit earlier in the
1113 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1114 cp->link_transition_jiffies = jiffies;
1115 cp->link_transition_jiffies_valid = 1;
1117 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1124 static int cas_pcs_interrupt(struct net_device *dev,
1125 struct cas *cp, u32 status)
1127 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1129 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1131 return cas_pcs_link_check(cp);
1134 static int cas_txmac_interrupt(struct net_device *dev,
1135 struct cas *cp, u32 status)
1137 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1142 if (netif_msg_intr(cp))
1143 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
1144 cp->dev->name, txmac_stat);
1146 /* Defer timer expiration is quite normal,
1147 * don't even log the event.
1149 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1150 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1153 spin_lock(&cp->stat_lock[0]);
1154 if (txmac_stat & MAC_TX_UNDERRUN) {
1155 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
1157 cp->net_stats[0].tx_fifo_errors++;
1160 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1161 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
1163 cp->net_stats[0].tx_errors++;
1166 /* The rest are all cases of one of the 16-bit TX
1167 * counters expiring.
1169 if (txmac_stat & MAC_TX_COLL_NORMAL)
1170 cp->net_stats[0].collisions += 0x10000;
1172 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1173 cp->net_stats[0].tx_aborted_errors += 0x10000;
1174 cp->net_stats[0].collisions += 0x10000;
1177 if (txmac_stat & MAC_TX_COLL_LATE) {
1178 cp->net_stats[0].tx_aborted_errors += 0x10000;
1179 cp->net_stats[0].collisions += 0x10000;
1181 spin_unlock(&cp->stat_lock[0]);
1183 /* We do not keep track of MAC_TX_COLL_FIRST and
1184 * MAC_TX_PEAK_ATTEMPTS events.
1189 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1191 cas_hp_inst_t *inst;
1196 while ((inst = firmware) && inst->note) {
1197 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1199 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1200 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1201 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1203 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1204 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1205 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1206 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1207 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1208 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1209 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1210 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1212 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1213 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1214 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1215 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1216 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1222 static void cas_init_rx_dma(struct cas *cp)
1224 u64 desc_dma = cp->block_dvma;
1228 /* rx free descriptors */
1229 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1230 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1231 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1232 if ((N_RX_DESC_RINGS > 1) &&
1233 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
1234 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1235 writel(val, cp->regs + REG_RX_CFG);
1237 val = (unsigned long) cp->init_rxds[0] -
1238 (unsigned long) cp->init_block;
1239 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1240 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1241 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1243 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1244 /* rx desc 2 is for IPSEC packets. however,
1245 * we don't it that for that purpose.
1247 val = (unsigned long) cp->init_rxds[1] -
1248 (unsigned long) cp->init_block;
1249 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1250 writel((desc_dma + val) & 0xffffffff, cp->regs +
1251 REG_PLUS_RX_DB1_LOW);
1252 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1256 /* rx completion registers */
1257 val = (unsigned long) cp->init_rxcs[0] -
1258 (unsigned long) cp->init_block;
1259 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1260 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1262 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1264 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1265 val = (unsigned long) cp->init_rxcs[i] -
1266 (unsigned long) cp->init_block;
1267 writel((desc_dma + val) >> 32, cp->regs +
1268 REG_PLUS_RX_CBN_HI(i));
1269 writel((desc_dma + val) & 0xffffffff, cp->regs +
1270 REG_PLUS_RX_CBN_LOW(i));
1274 /* read selective clear regs to prevent spurious interrupts
1275 * on reset because complete == kick.
1276 * selective clear set up to prevent interrupts on resets
1278 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1279 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1280 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1281 for (i = 1; i < N_RX_COMP_RINGS; i++)
1282 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1284 /* 2 is different from 3 and 4 */
1285 if (N_RX_COMP_RINGS > 1)
1286 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1287 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1289 for (i = 2; i < N_RX_COMP_RINGS; i++)
1290 writel(INTR_RX_DONE_ALT,
1291 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1294 /* set up pause thresholds */
1295 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1296 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1297 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1298 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1299 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1301 /* zero out dma reassembly buffers */
1302 for (i = 0; i < 64; i++) {
1303 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1304 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1305 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1306 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1309 /* make sure address register is 0 for normal operation */
1310 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1311 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1313 /* interrupt mitigation */
1315 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1316 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1317 writel(val, cp->regs + REG_RX_BLANK);
1319 writel(0x0, cp->regs + REG_RX_BLANK);
1322 /* interrupt generation as a function of low water marks for
1323 * free desc and completion entries. these are used to trigger
1324 * housekeeping for rx descs. we don't use the free interrupt
1325 * as it's not very useful
1327 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1328 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1329 writel(val, cp->regs + REG_RX_AE_THRESH);
1330 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1331 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1332 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1335 /* Random early detect registers. useful for congestion avoidance.
1336 * this should be tunable.
1338 writel(0x0, cp->regs + REG_RX_RED);
1340 /* receive page sizes. default == 2K (0x800) */
1342 if (cp->page_size == 0x1000)
1344 else if (cp->page_size == 0x2000)
1346 else if (cp->page_size == 0x4000)
1349 /* round mtu + offset. constrain to page size. */
1350 size = cp->dev->mtu + 64;
1351 if (size > cp->page_size)
1352 size = cp->page_size;
1356 else if (size <= 0x800)
1358 else if (size <= 0x1000)
1363 cp->mtu_stride = 1 << (i + 10);
1364 val = CAS_BASE(RX_PAGE_SIZE, val);
1365 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1366 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1367 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1368 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1370 /* enable the header parser if desired */
1371 if (CAS_HP_FIRMWARE == cas_prog_null)
1374 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1375 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1376 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1377 writel(val, cp->regs + REG_HP_CFG);
1380 static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1382 memset(rxc, 0, sizeof(*rxc));
1383 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1386 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1387 * flipping is protected by the fact that the chip will not
1388 * hand back the same page index while it's being processed.
1390 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1392 cas_page_t *page = cp->rx_pages[1][index];
1395 if (page_count(page->buffer) == 1)
1398 new = cas_page_dequeue(cp);
1400 spin_lock(&cp->rx_inuse_lock);
1401 list_add(&page->list, &cp->rx_inuse_list);
1402 spin_unlock(&cp->rx_inuse_lock);
1407 /* this needs to be changed if we actually use the ENC RX DESC ring */
1408 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1411 cas_page_t **page0 = cp->rx_pages[0];
1412 cas_page_t **page1 = cp->rx_pages[1];
1414 /* swap if buffer is in use */
1415 if (page_count(page0[index]->buffer) > 1) {
1416 cas_page_t *new = cas_page_spare(cp, index);
1418 page1[index] = page0[index];
1422 RX_USED_SET(page0[index], 0);
1423 return page0[index];
1426 static void cas_clean_rxds(struct cas *cp)
1428 /* only clean ring 0 as ring 1 is used for spare buffers */
1429 struct cas_rx_desc *rxd = cp->init_rxds[0];
1432 /* release all rx flows */
1433 for (i = 0; i < N_RX_FLOWS; i++) {
1434 struct sk_buff *skb;
1435 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1436 cas_skb_release(skb);
1440 /* initialize descriptors */
1441 size = RX_DESC_RINGN_SIZE(0);
1442 for (i = 0; i < size; i++) {
1443 cas_page_t *page = cas_page_swap(cp, 0, i);
1444 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1445 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1446 CAS_BASE(RX_INDEX_RING, 0));
1449 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1451 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1454 static void cas_clean_rxcs(struct cas *cp)
1458 /* take ownership of rx comp descriptors */
1459 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1460 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1461 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1462 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1463 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1464 cas_rxc_init(rxc + j);
1470 /* When we get a RX fifo overflow, the RX unit is probably hung
1471 * so we do the following.
1473 * If any part of the reset goes wrong, we return 1 and that causes the
1474 * whole chip to be reset.
1476 static int cas_rxmac_reset(struct cas *cp)
1478 struct net_device *dev = cp->dev;
1482 /* First, reset MAC RX. */
1483 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1484 for (limit = 0; limit < STOP_TRIES; limit++) {
1485 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1489 if (limit == STOP_TRIES) {
1490 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
1491 "chip.\n", dev->name);
1495 /* Second, disable RX DMA. */
1496 writel(0, cp->regs + REG_RX_CFG);
1497 for (limit = 0; limit < STOP_TRIES; limit++) {
1498 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1502 if (limit == STOP_TRIES) {
1503 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
1504 "chip.\n", dev->name);
1510 /* Execute RX reset command. */
1511 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1512 for (limit = 0; limit < STOP_TRIES; limit++) {
1513 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1517 if (limit == STOP_TRIES) {
1518 printk(KERN_ERR "%s: RX reset command will not execute, "
1519 "resetting whole chip.\n", dev->name);
1523 /* reset driver rx state */
1527 /* Now, reprogram the rest of RX unit. */
1528 cas_init_rx_dma(cp);
1531 val = readl(cp->regs + REG_RX_CFG);
1532 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1533 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1534 val = readl(cp->regs + REG_MAC_RX_CFG);
1535 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1540 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1543 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1548 if (netif_msg_intr(cp))
1549 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1550 cp->dev->name, stat);
1552 /* these are all rollovers */
1553 spin_lock(&cp->stat_lock[0]);
1554 if (stat & MAC_RX_ALIGN_ERR)
1555 cp->net_stats[0].rx_frame_errors += 0x10000;
1557 if (stat & MAC_RX_CRC_ERR)
1558 cp->net_stats[0].rx_crc_errors += 0x10000;
1560 if (stat & MAC_RX_LEN_ERR)
1561 cp->net_stats[0].rx_length_errors += 0x10000;
1563 if (stat & MAC_RX_OVERFLOW) {
1564 cp->net_stats[0].rx_over_errors++;
1565 cp->net_stats[0].rx_fifo_errors++;
1568 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1571 spin_unlock(&cp->stat_lock[0]);
1575 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1578 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1583 if (netif_msg_intr(cp))
1584 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
1585 cp->dev->name, stat);
1587 /* This interrupt is just for pause frame and pause
1588 * tracking. It is useful for diagnostics and debug
1589 * but probably by default we will mask these events.
1591 if (stat & MAC_CTRL_PAUSE_STATE)
1592 cp->pause_entered++;
1594 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1595 cp->pause_last_time_recvd = (stat >> 16);
1601 /* Must be invoked under cp->lock. */
1602 static inline int cas_mdio_link_not_up(struct cas *cp)
1606 switch (cp->lstate) {
1607 case link_force_ret:
1608 if (netif_msg_link(cp))
1609 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1610 " forced mode\n", cp->dev->name);
1611 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1612 cp->timer_ticks = 5;
1613 cp->lstate = link_force_ok;
1614 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1618 val = cas_phy_read(cp, MII_BMCR);
1620 /* Try forced modes. we try things in the following order:
1621 * 1000 full -> 100 full/half -> 10 half
1623 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1624 val |= BMCR_FULLDPLX;
1625 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1626 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1627 cas_phy_write(cp, MII_BMCR, val);
1628 cp->timer_ticks = 5;
1629 cp->lstate = link_force_try;
1630 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1633 case link_force_try:
1634 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1635 val = cas_phy_read(cp, MII_BMCR);
1636 cp->timer_ticks = 5;
1637 if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1638 val &= ~CAS_BMCR_SPEED1000;
1639 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1640 cas_phy_write(cp, MII_BMCR, val);
1644 if (val & BMCR_SPEED100) {
1645 if (val & BMCR_FULLDPLX) /* fd failed */
1646 val &= ~BMCR_FULLDPLX;
1647 else { /* 100Mbps failed */
1648 val &= ~BMCR_SPEED100;
1650 cas_phy_write(cp, MII_BMCR, val);
1660 /* must be invoked with cp->lock held */
1661 static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1665 if (bmsr & BMSR_LSTATUS) {
1666 /* Ok, here we got a link. If we had it due to a forced
1667 * fallback, and we were configured for autoneg, we
1668 * retry a short autoneg pass. If you know your hub is
1669 * broken, use ethtool ;)
1671 if ((cp->lstate == link_force_try) &&
1672 (cp->link_cntl & BMCR_ANENABLE)) {
1673 cp->lstate = link_force_ret;
1674 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1675 cas_mif_poll(cp, 0);
1676 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1677 cp->timer_ticks = 5;
1678 if (cp->opened && netif_msg_link(cp))
1679 printk(KERN_INFO "%s: Got link after fallback, retrying"
1680 " autoneg once...\n", cp->dev->name);
1681 cas_phy_write(cp, MII_BMCR,
1682 cp->link_fcntl | BMCR_ANENABLE |
1684 cas_mif_poll(cp, 1);
1686 } else if (cp->lstate != link_up) {
1687 cp->lstate = link_up;
1688 cp->link_transition = LINK_TRANSITION_LINK_UP;
1691 cas_set_link_modes(cp);
1692 netif_carrier_on(cp->dev);
1698 /* link not up. if the link was previously up, we restart the
1702 if (cp->lstate == link_up) {
1703 cp->lstate = link_down;
1704 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1706 netif_carrier_off(cp->dev);
1707 if (cp->opened && netif_msg_link(cp))
1708 printk(KERN_INFO "%s: Link down\n",
1712 } else if (++cp->timer_ticks > 10)
1713 cas_mdio_link_not_up(cp);
1718 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1721 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1724 /* check for a link change */
1725 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1728 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1729 return cas_mii_link_check(cp, bmsr);
1732 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1735 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1740 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
1741 readl(cp->regs + REG_BIM_DIAG));
1743 /* cassini+ has this reserved */
1744 if ((stat & PCI_ERR_BADACK) &&
1745 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1746 printk("<No ACK64# during ABS64 cycle> ");
1748 if (stat & PCI_ERR_DTRTO)
1749 printk("<Delayed transaction timeout> ");
1750 if (stat & PCI_ERR_OTHER)
1752 if (stat & PCI_ERR_BIM_DMA_WRITE)
1753 printk("<BIM DMA 0 write req> ");
1754 if (stat & PCI_ERR_BIM_DMA_READ)
1755 printk("<BIM DMA 0 read req> ");
1758 if (stat & PCI_ERR_OTHER) {
1761 /* Interrogate PCI config space for the
1764 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1765 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
1767 if (cfg & PCI_STATUS_PARITY)
1768 printk(KERN_ERR "%s: PCI parity error detected.\n",
1770 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1771 printk(KERN_ERR "%s: PCI target abort.\n",
1773 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1774 printk(KERN_ERR "%s: PCI master acks target abort.\n",
1776 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1777 printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
1778 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1779 printk(KERN_ERR "%s: PCI system error SERR#.\n",
1781 if (cfg & PCI_STATUS_DETECTED_PARITY)
1782 printk(KERN_ERR "%s: PCI parity error.\n",
1785 /* Write the error bits back to clear them. */
1786 cfg &= (PCI_STATUS_PARITY |
1787 PCI_STATUS_SIG_TARGET_ABORT |
1788 PCI_STATUS_REC_TARGET_ABORT |
1789 PCI_STATUS_REC_MASTER_ABORT |
1790 PCI_STATUS_SIG_SYSTEM_ERROR |
1791 PCI_STATUS_DETECTED_PARITY);
1792 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1795 /* For all PCI errors, we should reset the chip. */
1799 /* All non-normal interrupt conditions get serviced here.
1800 * Returns non-zero if we should just exit the interrupt
1801 * handler right now (ie. if we reset the card which invalidates
1802 * all of the other original irq status bits).
1804 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1807 if (status & INTR_RX_TAG_ERROR) {
1808 /* corrupt RX tag framing */
1809 if (netif_msg_rx_err(cp))
1810 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
1812 spin_lock(&cp->stat_lock[0]);
1813 cp->net_stats[0].rx_errors++;
1814 spin_unlock(&cp->stat_lock[0]);
1818 if (status & INTR_RX_LEN_MISMATCH) {
1819 /* length mismatch. */
1820 if (netif_msg_rx_err(cp))
1821 printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
1823 spin_lock(&cp->stat_lock[0]);
1824 cp->net_stats[0].rx_errors++;
1825 spin_unlock(&cp->stat_lock[0]);
1829 if (status & INTR_PCS_STATUS) {
1830 if (cas_pcs_interrupt(dev, cp, status))
1834 if (status & INTR_TX_MAC_STATUS) {
1835 if (cas_txmac_interrupt(dev, cp, status))
1839 if (status & INTR_RX_MAC_STATUS) {
1840 if (cas_rxmac_interrupt(dev, cp, status))
1844 if (status & INTR_MAC_CTRL_STATUS) {
1845 if (cas_mac_interrupt(dev, cp, status))
1849 if (status & INTR_MIF_STATUS) {
1850 if (cas_mif_interrupt(dev, cp, status))
1854 if (status & INTR_PCI_ERROR_STATUS) {
1855 if (cas_pci_interrupt(dev, cp, status))
1862 atomic_inc(&cp->reset_task_pending);
1863 atomic_inc(&cp->reset_task_pending_all);
1864 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
1866 schedule_work(&cp->reset_task);
1868 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1869 printk(KERN_ERR "reset called in cas_abnormal_irq\n");
1870 schedule_work(&cp->reset_task);
1875 /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1876 * determining whether to do a netif_stop/wakeup
1878 #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1879 #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1880 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1883 unsigned long off = addr + len;
1885 if (CAS_TABORT(cp) == 1)
1887 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1889 return TX_TARGET_ABORT_LEN;
1892 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1894 struct cas_tx_desc *txds;
1895 struct sk_buff **skbs;
1896 struct net_device *dev = cp->dev;
1899 spin_lock(&cp->tx_lock[ring]);
1900 txds = cp->init_txds[ring];
1901 skbs = cp->tx_skbs[ring];
1902 entry = cp->tx_old[ring];
1904 count = TX_BUFF_COUNT(ring, entry, limit);
1905 while (entry != limit) {
1906 struct sk_buff *skb = skbs[entry];
1912 /* this should never occur */
1913 entry = TX_DESC_NEXT(ring, entry);
1917 /* however, we might get only a partial skb release. */
1918 count -= skb_shinfo(skb)->nr_frags +
1919 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1923 if (netif_msg_tx_done(cp))
1924 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
1925 cp->dev->name, ring, entry);
1928 cp->tx_tiny_use[ring][entry].nbufs = 0;
1930 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1931 struct cas_tx_desc *txd = txds + entry;
1933 daddr = le64_to_cpu(txd->buffer);
1934 dlen = CAS_VAL(TX_DESC_BUFLEN,
1935 le64_to_cpu(txd->control));
1936 pci_unmap_page(cp->pdev, daddr, dlen,
1938 entry = TX_DESC_NEXT(ring, entry);
1940 /* tiny buffer may follow */
1941 if (cp->tx_tiny_use[ring][entry].used) {
1942 cp->tx_tiny_use[ring][entry].used = 0;
1943 entry = TX_DESC_NEXT(ring, entry);
1947 spin_lock(&cp->stat_lock[ring]);
1948 cp->net_stats[ring].tx_packets++;
1949 cp->net_stats[ring].tx_bytes += skb->len;
1950 spin_unlock(&cp->stat_lock[ring]);
1951 dev_kfree_skb_irq(skb);
1953 cp->tx_old[ring] = entry;
1955 /* this is wrong for multiple tx rings. the net device needs
1956 * multiple queues for this to do the right thing. we wait
1957 * for 2*packets to be available when using tiny buffers
1959 if (netif_queue_stopped(dev) &&
1960 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1961 netif_wake_queue(dev);
1962 spin_unlock(&cp->tx_lock[ring]);
1965 static void cas_tx(struct net_device *dev, struct cas *cp,
1969 #ifdef USE_TX_COMPWB
1970 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1972 if (netif_msg_intr(cp))
1973 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
1974 cp->dev->name, status, (unsigned long long)compwb);
1975 /* process all the rings */
1976 for (ring = 0; ring < N_TX_RINGS; ring++) {
1977 #ifdef USE_TX_COMPWB
1978 /* use the completion writeback registers */
1979 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1980 CAS_VAL(TX_COMPWB_LSB, compwb);
1981 compwb = TX_COMPWB_NEXT(compwb);
1983 limit = readl(cp->regs + REG_TX_COMPN(ring));
1985 if (cp->tx_old[ring] != limit)
1986 cas_tx_ringN(cp, ring, limit);
1991 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1992 int entry, const u64 *words,
1993 struct sk_buff **skbref)
1995 int dlen, hlen, len, i, alloclen;
1996 int off, swivel = RX_SWIVEL_OFF_VAL;
1997 struct cas_page *page;
1998 struct sk_buff *skb;
1999 void *addr, *crcaddr;
2003 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
2004 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
2007 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
2010 alloclen = max(hlen, RX_COPY_MIN);
2012 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
2017 skb_reserve(skb, swivel);
2020 addr = crcaddr = NULL;
2021 if (hlen) { /* always copy header pages */
2022 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2023 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2024 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
2028 if (!dlen) /* attach FCS */
2030 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2031 PCI_DMA_FROMDEVICE);
2032 addr = cas_page_map(page->buffer);
2033 memcpy(p, addr + off, i);
2034 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2035 PCI_DMA_FROMDEVICE);
2036 cas_page_unmap(addr);
2037 RX_USED_ADD(page, 0x100);
2043 if (alloclen < (hlen + dlen)) {
2044 skb_frag_t *frag = skb_shinfo(skb)->frags;
2046 /* normal or jumbo packets. we use frags */
2047 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2048 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2049 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2051 hlen = min(cp->page_size - off, dlen);
2053 if (netif_msg_rx_err(cp)) {
2054 printk(KERN_DEBUG "%s: rx page overflow: "
2055 "%d\n", cp->dev->name, hlen);
2057 dev_kfree_skb_irq(skb);
2061 if (i == dlen) /* attach FCS */
2063 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2064 PCI_DMA_FROMDEVICE);
2066 /* make sure we always copy a header */
2068 if (p == (char *) skb->data) { /* not split */
2069 addr = cas_page_map(page->buffer);
2070 memcpy(p, addr + off, RX_COPY_MIN);
2071 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2072 PCI_DMA_FROMDEVICE);
2073 cas_page_unmap(addr);
2075 swivel = RX_COPY_MIN;
2076 RX_USED_ADD(page, cp->mtu_stride);
2078 RX_USED_ADD(page, hlen);
2080 skb_put(skb, alloclen);
2082 skb_shinfo(skb)->nr_frags++;
2083 skb->data_len += hlen - swivel;
2084 skb->truesize += hlen - swivel;
2085 skb->len += hlen - swivel;
2087 get_page(page->buffer);
2088 frag->page = page->buffer;
2089 frag->page_offset = off;
2090 frag->size = hlen - swivel;
2092 /* any more data? */
2093 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2097 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2098 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2099 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2100 hlen + cp->crc_size,
2101 PCI_DMA_FROMDEVICE);
2102 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2103 hlen + cp->crc_size,
2104 PCI_DMA_FROMDEVICE);
2106 skb_shinfo(skb)->nr_frags++;
2107 skb->data_len += hlen;
2111 get_page(page->buffer);
2112 frag->page = page->buffer;
2113 frag->page_offset = 0;
2115 RX_USED_ADD(page, hlen + cp->crc_size);
2119 addr = cas_page_map(page->buffer);
2120 crcaddr = addr + off + hlen;
2124 /* copying packet */
2128 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2129 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2130 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2131 hlen = min(cp->page_size - off, dlen);
2133 if (netif_msg_rx_err(cp)) {
2134 printk(KERN_DEBUG "%s: rx page overflow: "
2135 "%d\n", cp->dev->name, hlen);
2137 dev_kfree_skb_irq(skb);
2141 if (i == dlen) /* attach FCS */
2143 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2144 PCI_DMA_FROMDEVICE);
2145 addr = cas_page_map(page->buffer);
2146 memcpy(p, addr + off, i);
2147 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2148 PCI_DMA_FROMDEVICE);
2149 cas_page_unmap(addr);
2150 if (p == (char *) skb->data) /* not split */
2151 RX_USED_ADD(page, cp->mtu_stride);
2153 RX_USED_ADD(page, i);
2155 /* any more data? */
2156 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2158 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2159 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2160 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2161 dlen + cp->crc_size,
2162 PCI_DMA_FROMDEVICE);
2163 addr = cas_page_map(page->buffer);
2164 memcpy(p, addr, dlen + cp->crc_size);
2165 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2166 dlen + cp->crc_size,
2167 PCI_DMA_FROMDEVICE);
2168 cas_page_unmap(addr);
2169 RX_USED_ADD(page, dlen + cp->crc_size);
2174 crcaddr = skb->data + alloclen;
2176 skb_put(skb, alloclen);
2179 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2181 /* checksum includes FCS. strip it out. */
2182 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2183 csum_unfold(csum)));
2185 cas_page_unmap(addr);
2187 skb->protocol = eth_type_trans(skb, cp->dev);
2188 if (skb->protocol == htons(ETH_P_IP)) {
2189 skb->csum = csum_unfold(~csum);
2190 skb->ip_summed = CHECKSUM_COMPLETE;
2192 skb->ip_summed = CHECKSUM_NONE;
2197 /* we can handle up to 64 rx flows at a time. we do the same thing
2198 * as nonreassm except that we batch up the buffers.
2199 * NOTE: we currently just treat each flow as a bunch of packets that
2200 * we pass up. a better way would be to coalesce the packets
2201 * into a jumbo packet. to do that, we need to do the following:
2202 * 1) the first packet will have a clean split between header and
2204 * 2) each time the next flow packet comes in, extend the
2205 * data length and merge the checksums.
2206 * 3) on flow release, fix up the header.
2207 * 4) make sure the higher layer doesn't care.
2208 * because packets get coalesced, we shouldn't run into fragment count
2211 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2212 struct sk_buff *skb)
2214 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2215 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2217 /* this is protected at a higher layer, so no need to
2218 * do any additional locking here. stick the buffer
2221 __skb_queue_tail(flow, skb);
2222 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2223 while ((skb = __skb_dequeue(flow))) {
2224 cas_skb_release(skb);
2229 /* put rx descriptor back on ring. if a buffer is in use by a higher
2230 * layer, this will need to put in a replacement.
2232 static void cas_post_page(struct cas *cp, const int ring, const int index)
2237 entry = cp->rx_old[ring];
2239 new = cas_page_swap(cp, ring, index);
2240 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2241 cp->init_rxds[ring][entry].index =
2242 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2243 CAS_BASE(RX_INDEX_RING, ring));
2245 entry = RX_DESC_ENTRY(ring, entry + 1);
2246 cp->rx_old[ring] = entry;
2252 writel(entry, cp->regs + REG_RX_KICK);
2253 else if ((N_RX_DESC_RINGS > 1) &&
2254 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2255 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2259 /* only when things are bad */
2260 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2262 unsigned int entry, last, count, released;
2264 cas_page_t **page = cp->rx_pages[ring];
2266 entry = cp->rx_old[ring];
2268 if (netif_msg_intr(cp))
2269 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
2270 cp->dev->name, ring, entry);
2273 count = entry & 0x3;
2274 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2276 while (entry != last) {
2277 /* make a new buffer if it's still in use */
2278 if (page_count(page[entry]->buffer) > 1) {
2279 cas_page_t *new = cas_page_dequeue(cp);
2281 /* let the timer know that we need to
2284 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2285 if (!timer_pending(&cp->link_timer))
2286 mod_timer(&cp->link_timer, jiffies +
2287 CAS_LINK_FAST_TIMEOUT);
2288 cp->rx_old[ring] = entry;
2289 cp->rx_last[ring] = num ? num - released : 0;
2292 spin_lock(&cp->rx_inuse_lock);
2293 list_add(&page[entry]->list, &cp->rx_inuse_list);
2294 spin_unlock(&cp->rx_inuse_lock);
2295 cp->init_rxds[ring][entry].buffer =
2296 cpu_to_le64(new->dma_addr);
2306 entry = RX_DESC_ENTRY(ring, entry + 1);
2308 cp->rx_old[ring] = entry;
2314 writel(cluster, cp->regs + REG_RX_KICK);
2315 else if ((N_RX_DESC_RINGS > 1) &&
2316 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2317 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2322 /* process a completion ring. packets are set up in three basic ways:
2323 * small packets: should be copied header + data in single buffer.
2324 * large packets: header and data in a single buffer.
2325 * split packets: header in a separate buffer from data.
2326 * data may be in multiple pages. data may be > 256
2327 * bytes but in a single page.
2329 * NOTE: RX page posting is done in this routine as well. while there's
2330 * the capability of using multiple RX completion rings, it isn't
2331 * really worthwhile due to the fact that the page posting will
2332 * force serialization on the single descriptor ring.
2334 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2336 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2340 if (netif_msg_intr(cp))
2341 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
2342 cp->dev->name, ring,
2343 readl(cp->regs + REG_RX_COMP_HEAD),
2346 entry = cp->rx_new[ring];
2349 struct cas_rx_comp *rxc = rxcs + entry;
2350 struct sk_buff *uninitialized_var(skb);
2355 words[0] = le64_to_cpu(rxc->word1);
2356 words[1] = le64_to_cpu(rxc->word2);
2357 words[2] = le64_to_cpu(rxc->word3);
2358 words[3] = le64_to_cpu(rxc->word4);
2360 /* don't touch if still owned by hw */
2361 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2365 /* hw hasn't cleared the zero bit yet */
2366 if (words[3] & RX_COMP4_ZERO) {
2370 /* get info on the packet */
2371 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2372 spin_lock(&cp->stat_lock[ring]);
2373 cp->net_stats[ring].rx_errors++;
2374 if (words[3] & RX_COMP4_LEN_MISMATCH)
2375 cp->net_stats[ring].rx_length_errors++;
2376 if (words[3] & RX_COMP4_BAD)
2377 cp->net_stats[ring].rx_crc_errors++;
2378 spin_unlock(&cp->stat_lock[ring]);
2380 /* We'll just return it to Cassini. */
2382 spin_lock(&cp->stat_lock[ring]);
2383 ++cp->net_stats[ring].rx_dropped;
2384 spin_unlock(&cp->stat_lock[ring]);
2388 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2394 /* see if it's a flow re-assembly or not. the driver
2395 * itself handles release back up.
2397 if (RX_DONT_BATCH || (type == 0x2)) {
2398 /* non-reassm: these always get released */
2399 cas_skb_release(skb);
2401 cas_rx_flow_pkt(cp, words, skb);
2404 spin_lock(&cp->stat_lock[ring]);
2405 cp->net_stats[ring].rx_packets++;
2406 cp->net_stats[ring].rx_bytes += len;
2407 spin_unlock(&cp->stat_lock[ring]);
2412 /* should it be released? */
2413 if (words[0] & RX_COMP1_RELEASE_HDR) {
2414 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2415 dring = CAS_VAL(RX_INDEX_RING, i);
2416 i = CAS_VAL(RX_INDEX_NUM, i);
2417 cas_post_page(cp, dring, i);
2420 if (words[0] & RX_COMP1_RELEASE_DATA) {
2421 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2422 dring = CAS_VAL(RX_INDEX_RING, i);
2423 i = CAS_VAL(RX_INDEX_NUM, i);
2424 cas_post_page(cp, dring, i);
2427 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2428 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2429 dring = CAS_VAL(RX_INDEX_RING, i);
2430 i = CAS_VAL(RX_INDEX_NUM, i);
2431 cas_post_page(cp, dring, i);
2434 /* skip to the next entry */
2435 entry = RX_COMP_ENTRY(ring, entry + 1 +
2436 CAS_VAL(RX_COMP1_SKIP, words[0]));
2438 if (budget && (npackets >= budget))
2442 cp->rx_new[ring] = entry;
2445 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
2451 /* put completion entries back on the ring */
2452 static void cas_post_rxcs_ringN(struct net_device *dev,
2453 struct cas *cp, int ring)
2455 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2458 last = cp->rx_cur[ring];
2459 entry = cp->rx_new[ring];
2460 if (netif_msg_intr(cp))
2461 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
2462 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2465 /* zero and re-mark descriptors */
2466 while (last != entry) {
2467 cas_rxc_init(rxc + last);
2468 last = RX_COMP_ENTRY(ring, last + 1);
2470 cp->rx_cur[ring] = last;
2473 writel(last, cp->regs + REG_RX_COMP_TAIL);
2474 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2475 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2480 /* cassini can use all four PCI interrupts for the completion ring.
2481 * rings 3 and 4 are identical
2483 #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2484 static inline void cas_handle_irqN(struct net_device *dev,
2485 struct cas *cp, const u32 status,
2488 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2489 cas_post_rxcs_ringN(dev, cp, ring);
2492 static irqreturn_t cas_interruptN(int irq, void *dev_id)
2494 struct net_device *dev = dev_id;
2495 struct cas *cp = netdev_priv(dev);
2496 unsigned long flags;
2498 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2500 /* check for shared irq */
2504 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2505 spin_lock_irqsave(&cp->lock, flags);
2506 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2509 napi_schedule(&cp->napi);
2511 cas_rx_ringN(cp, ring, 0);
2513 status &= ~INTR_RX_DONE_ALT;
2517 cas_handle_irqN(dev, cp, status, ring);
2518 spin_unlock_irqrestore(&cp->lock, flags);
2524 /* everything but rx packets */
2525 static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2527 if (status & INTR_RX_BUF_UNAVAIL_1) {
2528 /* Frame arrived, no free RX buffers available.
2529 * NOTE: we can get this on a link transition. */
2530 cas_post_rxds_ringN(cp, 1, 0);
2531 spin_lock(&cp->stat_lock[1]);
2532 cp->net_stats[1].rx_dropped++;
2533 spin_unlock(&cp->stat_lock[1]);
2536 if (status & INTR_RX_BUF_AE_1)
2537 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2538 RX_AE_FREEN_VAL(1));
2540 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2541 cas_post_rxcs_ringN(cp, 1);
2544 /* ring 2 handles a few more events than 3 and 4 */
2545 static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2547 struct net_device *dev = dev_id;
2548 struct cas *cp = netdev_priv(dev);
2549 unsigned long flags;
2550 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2552 /* check for shared interrupt */
2556 spin_lock_irqsave(&cp->lock, flags);
2557 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2560 napi_schedule(&cp->napi);
2562 cas_rx_ringN(cp, 1, 0);
2564 status &= ~INTR_RX_DONE_ALT;
2567 cas_handle_irq1(cp, status);
2568 spin_unlock_irqrestore(&cp->lock, flags);
2573 static inline void cas_handle_irq(struct net_device *dev,
2574 struct cas *cp, const u32 status)
2576 /* housekeeping interrupts */
2577 if (status & INTR_ERROR_MASK)
2578 cas_abnormal_irq(dev, cp, status);
2580 if (status & INTR_RX_BUF_UNAVAIL) {
2581 /* Frame arrived, no free RX buffers available.
2582 * NOTE: we can get this on a link transition.
2584 cas_post_rxds_ringN(cp, 0, 0);
2585 spin_lock(&cp->stat_lock[0]);
2586 cp->net_stats[0].rx_dropped++;
2587 spin_unlock(&cp->stat_lock[0]);
2588 } else if (status & INTR_RX_BUF_AE) {
2589 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2590 RX_AE_FREEN_VAL(0));
2593 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2594 cas_post_rxcs_ringN(dev, cp, 0);
2597 static irqreturn_t cas_interrupt(int irq, void *dev_id)
2599 struct net_device *dev = dev_id;
2600 struct cas *cp = netdev_priv(dev);
2601 unsigned long flags;
2602 u32 status = readl(cp->regs + REG_INTR_STATUS);
2607 spin_lock_irqsave(&cp->lock, flags);
2608 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2609 cas_tx(dev, cp, status);
2610 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2613 if (status & INTR_RX_DONE) {
2616 napi_schedule(&cp->napi);
2618 cas_rx_ringN(cp, 0, 0);
2620 status &= ~INTR_RX_DONE;
2624 cas_handle_irq(dev, cp, status);
2625 spin_unlock_irqrestore(&cp->lock, flags);
2631 static int cas_poll(struct napi_struct *napi, int budget)
2633 struct cas *cp = container_of(napi, struct cas, napi);
2634 struct net_device *dev = cp->dev;
2635 int i, enable_intr, credits;
2636 u32 status = readl(cp->regs + REG_INTR_STATUS);
2637 unsigned long flags;
2639 spin_lock_irqsave(&cp->lock, flags);
2640 cas_tx(dev, cp, status);
2641 spin_unlock_irqrestore(&cp->lock, flags);
2643 /* NAPI rx packets. we spread the credits across all of the
2646 * to make sure we're fair with the work we loop through each
2647 * ring N_RX_COMP_RING times with a request of
2648 * budget / N_RX_COMP_RINGS
2652 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2654 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2655 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2656 if (credits >= budget) {
2664 /* final rx completion */
2665 spin_lock_irqsave(&cp->lock, flags);
2667 cas_handle_irq(dev, cp, status);
2670 if (N_RX_COMP_RINGS > 1) {
2671 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2673 cas_handle_irq1(dev, cp, status);
2678 if (N_RX_COMP_RINGS > 2) {
2679 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2681 cas_handle_irqN(dev, cp, status, 2);
2686 if (N_RX_COMP_RINGS > 3) {
2687 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2689 cas_handle_irqN(dev, cp, status, 3);
2692 spin_unlock_irqrestore(&cp->lock, flags);
2694 napi_complete(napi);
2695 cas_unmask_intr(cp);
2701 #ifdef CONFIG_NET_POLL_CONTROLLER
2702 static void cas_netpoll(struct net_device *dev)
2704 struct cas *cp = netdev_priv(dev);
2706 cas_disable_irq(cp, 0);
2707 cas_interrupt(cp->pdev->irq, dev);
2708 cas_enable_irq(cp, 0);
2711 if (N_RX_COMP_RINGS > 1) {
2712 /* cas_interrupt1(); */
2716 if (N_RX_COMP_RINGS > 2) {
2717 /* cas_interruptN(); */
2721 if (N_RX_COMP_RINGS > 3) {
2722 /* cas_interruptN(); */
2728 static void cas_tx_timeout(struct net_device *dev)
2730 struct cas *cp = netdev_priv(dev);
2732 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2733 if (!cp->hw_running) {
2734 printk("%s: hrm.. hw not running!\n", dev->name);
2738 printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
2739 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
2741 printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
2742 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
2744 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
2745 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2747 readl(cp->regs + REG_TX_CFG),
2748 readl(cp->regs + REG_MAC_TX_STATUS),
2749 readl(cp->regs + REG_MAC_TX_CFG),
2750 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2751 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2752 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2753 readl(cp->regs + REG_TX_SM_1),
2754 readl(cp->regs + REG_TX_SM_2));
2756 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
2758 readl(cp->regs + REG_RX_CFG),
2759 readl(cp->regs + REG_MAC_RX_STATUS),
2760 readl(cp->regs + REG_MAC_RX_CFG));
2762 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
2764 readl(cp->regs + REG_HP_STATE_MACHINE),
2765 readl(cp->regs + REG_HP_STATUS0),
2766 readl(cp->regs + REG_HP_STATUS1),
2767 readl(cp->regs + REG_HP_STATUS2));
2770 atomic_inc(&cp->reset_task_pending);
2771 atomic_inc(&cp->reset_task_pending_all);
2772 schedule_work(&cp->reset_task);
2774 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2775 schedule_work(&cp->reset_task);
2779 static inline int cas_intme(int ring, int entry)
2781 /* Algorithm: IRQ every 1/2 of descriptors. */
2782 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2788 static void cas_write_txd(struct cas *cp, int ring, int entry,
2789 dma_addr_t mapping, int len, u64 ctrl, int last)
2791 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2793 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2794 if (cas_intme(ring, entry))
2795 ctrl |= TX_DESC_INTME;
2797 ctrl |= TX_DESC_EOF;
2798 txd->control = cpu_to_le64(ctrl);
2799 txd->buffer = cpu_to_le64(mapping);
2802 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2805 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2808 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2809 const int entry, const int tentry)
2811 cp->tx_tiny_use[ring][tentry].nbufs++;
2812 cp->tx_tiny_use[ring][entry].used = 1;
2813 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2816 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2817 struct sk_buff *skb)
2819 struct net_device *dev = cp->dev;
2820 int entry, nr_frags, frag, tabort, tentry;
2822 unsigned long flags;
2826 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2828 /* This is a hard error, log it. */
2829 if (TX_BUFFS_AVAIL(cp, ring) <=
2830 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2831 netif_stop_queue(dev);
2832 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2833 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
2834 "queue awake!\n", dev->name);
2839 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2840 const u64 csum_start_off = skb_transport_offset(skb);
2841 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2843 ctrl = TX_DESC_CSUM_EN |
2844 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2845 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2848 entry = cp->tx_new[ring];
2849 cp->tx_skbs[ring][entry] = skb;
2851 nr_frags = skb_shinfo(skb)->nr_frags;
2852 len = skb_headlen(skb);
2853 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2854 offset_in_page(skb->data), len,
2858 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2859 if (unlikely(tabort)) {
2860 /* NOTE: len is always > tabort */
2861 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2862 ctrl | TX_DESC_SOF, 0);
2863 entry = TX_DESC_NEXT(ring, entry);
2865 skb_copy_from_linear_data_offset(skb, len - tabort,
2866 tx_tiny_buf(cp, ring, entry), tabort);
2867 mapping = tx_tiny_map(cp, ring, entry, tentry);
2868 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2871 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2872 TX_DESC_SOF, (nr_frags == 0));
2874 entry = TX_DESC_NEXT(ring, entry);
2876 for (frag = 0; frag < nr_frags; frag++) {
2877 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2880 mapping = pci_map_page(cp->pdev, fragp->page,
2881 fragp->page_offset, len,
2884 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2885 if (unlikely(tabort)) {
2888 /* NOTE: len is always > tabort */
2889 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2891 entry = TX_DESC_NEXT(ring, entry);
2893 addr = cas_page_map(fragp->page);
2894 memcpy(tx_tiny_buf(cp, ring, entry),
2895 addr + fragp->page_offset + len - tabort,
2897 cas_page_unmap(addr);
2898 mapping = tx_tiny_map(cp, ring, entry, tentry);
2902 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2903 (frag + 1 == nr_frags));
2904 entry = TX_DESC_NEXT(ring, entry);
2907 cp->tx_new[ring] = entry;
2908 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2909 netif_stop_queue(dev);
2911 if (netif_msg_tx_queued(cp))
2912 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
2914 dev->name, ring, entry, skb->len,
2915 TX_BUFFS_AVAIL(cp, ring));
2916 writel(entry, cp->regs + REG_TX_KICKN(ring));
2917 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2921 static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2923 struct cas *cp = netdev_priv(dev);
2925 /* this is only used as a load-balancing hint, so it doesn't
2926 * need to be SMP safe
2930 if (skb_padto(skb, cp->min_frame_size))
2933 /* XXX: we need some higher-level QoS hooks to steer packets to
2934 * individual queues.
2936 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2937 return NETDEV_TX_BUSY;
2938 dev->trans_start = jiffies;
2942 static void cas_init_tx_dma(struct cas *cp)
2944 u64 desc_dma = cp->block_dvma;
2949 /* set up tx completion writeback registers. must be 8-byte aligned */
2950 #ifdef USE_TX_COMPWB
2951 off = offsetof(struct cas_init_block, tx_compwb);
2952 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2953 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2956 /* enable completion writebacks, enable paced mode,
2957 * disable read pipe, and disable pre-interrupt compwbs
2959 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2960 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2961 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2962 TX_CFG_INTR_COMPWB_DIS;
2964 /* write out tx ring info and tx desc bases */
2965 for (i = 0; i < MAX_TX_RINGS; i++) {
2966 off = (unsigned long) cp->init_txds[i] -
2967 (unsigned long) cp->init_block;
2969 val |= CAS_TX_RINGN_BASE(i);
2970 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2971 writel((desc_dma + off) & 0xffffffff, cp->regs +
2973 /* don't zero out the kick register here as the system
2977 writel(val, cp->regs + REG_TX_CFG);
2979 /* program max burst sizes. these numbers should be different
2983 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2984 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2985 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2986 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2988 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2989 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2990 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2991 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2995 /* Must be invoked under cp->lock. */
2996 static inline void cas_init_dma(struct cas *cp)
2998 cas_init_tx_dma(cp);
2999 cas_init_rx_dma(cp);
3002 /* Must be invoked under cp->lock. */
3003 static u32 cas_setup_multicast(struct cas *cp)
3008 if (cp->dev->flags & IFF_PROMISC) {
3009 rxcfg |= MAC_RX_CFG_PROMISC_EN;
3011 } else if (cp->dev->flags & IFF_ALLMULTI) {
3012 for (i=0; i < 16; i++)
3013 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
3014 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3019 struct dev_mc_list *dmi = cp->dev->mc_list;
3022 /* use the alternate mac address registers for the
3023 * first 15 multicast addresses
3025 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
3027 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
3028 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
3029 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
3032 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
3033 cp->regs + REG_MAC_ADDRN(i*3 + 0));
3034 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
3035 cp->regs + REG_MAC_ADDRN(i*3 + 1));
3036 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
3037 cp->regs + REG_MAC_ADDRN(i*3 + 2));
3041 /* use hw hash table for the next series of
3042 * multicast addresses
3044 memset(hash_table, 0, sizeof(hash_table));
3046 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
3048 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
3051 for (i=0; i < 16; i++)
3052 writel(hash_table[i], cp->regs +
3053 REG_MAC_HASH_TABLEN(i));
3054 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3060 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
3061 static void cas_clear_mac_err(struct cas *cp)
3063 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3064 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3065 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3066 writel(0, cp->regs + REG_MAC_COLL_LATE);
3067 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3068 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3069 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3070 writel(0, cp->regs + REG_MAC_LEN_ERR);
3071 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3072 writel(0, cp->regs + REG_MAC_FCS_ERR);
3073 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3077 static void cas_mac_reset(struct cas *cp)
3081 /* do both TX and RX reset */
3082 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3083 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3088 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3096 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3101 if (readl(cp->regs + REG_MAC_TX_RESET) |
3102 readl(cp->regs + REG_MAC_RX_RESET))
3103 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
3104 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
3105 readl(cp->regs + REG_MAC_RX_RESET),
3106 readl(cp->regs + REG_MAC_STATE_MACHINE));
3110 /* Must be invoked under cp->lock. */
3111 static void cas_init_mac(struct cas *cp)
3113 unsigned char *e = &cp->dev->dev_addr[0];
3115 #ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3120 /* setup core arbitration weight register */
3121 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3123 /* XXX Use pci_dma_burst_advice() */
3124 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3125 /* set the infinite burst register for chips that don't have
3128 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3129 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3132 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3134 writel(0x00, cp->regs + REG_MAC_IPG0);
3135 writel(0x08, cp->regs + REG_MAC_IPG1);
3136 writel(0x04, cp->regs + REG_MAC_IPG2);
3138 /* change later for 802.3z */
3139 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3141 /* min frame + FCS */
3142 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3144 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3145 * specify the maximum frame size to prevent RX tag errors on
3148 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3149 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3150 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3151 cp->regs + REG_MAC_FRAMESIZE_MAX);
3153 /* NOTE: crc_size is used as a surrogate for half-duplex.
3154 * workaround saturn half-duplex issue by increasing preamble
3157 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3158 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3160 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3161 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3162 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3163 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3165 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3167 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3168 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3169 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3170 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3171 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3173 /* setup mac address in perfect filter array */
3174 for (i = 0; i < 45; i++)
3175 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3177 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3178 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3179 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3181 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3182 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3183 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3185 #ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3186 cp->mac_rx_cfg = cas_setup_multicast(cp);
3188 /* WTZ: Do what Adrian did in cas_set_multicast. Doing
3189 * a writel does not seem to be necessary because Cassini
3190 * seems to preserve the configuration when we do the reset.
3191 * If the chip is in trouble, though, it is not clear if we
3192 * can really count on this behavior. cas_set_multicast uses
3193 * spin_lock_irqsave, but we are called only in cas_init_hw and
3194 * cas_init_hw is protected by cas_lock_all, which calls
3195 * spin_lock_irq (so it doesn't need to save the flags, and
3196 * we should be OK for the writel, as that is the only
3199 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
3200 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
3202 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3203 cas_clear_mac_err(cp);
3204 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3206 /* Setup MAC interrupts. We want to get all of the interesting
3207 * counter expiration events, but we do not want to hear about
3208 * normal rx/tx as the DMA engine tells us that.
3210 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3211 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3213 /* Don't enable even the PAUSE interrupts for now, we
3214 * make no use of those events other than to record them.
3216 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3219 /* Must be invoked under cp->lock. */
3220 static void cas_init_pause_thresholds(struct cas *cp)
3222 /* Calculate pause thresholds. Setting the OFF threshold to the
3223 * full RX fifo size effectively disables PAUSE generation
3225 if (cp->rx_fifo_size <= (2 * 1024)) {
3226 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3228 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3229 if (max_frame * 3 > cp->rx_fifo_size) {
3230 cp->rx_pause_off = 7104;
3231 cp->rx_pause_on = 960;
3233 int off = (cp->rx_fifo_size - (max_frame * 2));
3234 int on = off - max_frame;
3235 cp->rx_pause_off = off;
3236 cp->rx_pause_on = on;
3241 static int cas_vpd_match(const void __iomem *p, const char *str)
3243 int len = strlen(str) + 1;
3246 for (i = 0; i < len; i++) {
3247 if (readb(p + i) != str[i])
3254 /* get the mac address by reading the vpd information in the rom.
3255 * also get the phy type and determine if there's an entropy generator.
3256 * NOTE: this is a bit convoluted for the following reasons:
3257 * 1) vpd info has order-dependent mac addresses for multinic cards
3258 * 2) the only way to determine the nic order is to use the slot
3260 * 3) fiber cards don't have bridges, so their slot numbers don't
3262 * 4) we don't actually know we have a fiber card until after
3263 * the mac addresses are parsed.
3265 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3268 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3269 void __iomem *base, *kstart;
3272 #define VPD_FOUND_MAC 0x01
3273 #define VPD_FOUND_PHY 0x02
3275 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3278 /* give us access to the PROM */
3279 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3280 cp->regs + REG_BIM_LOCAL_DEV_EN);
3282 /* check for an expansion rom */
3283 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3284 goto use_random_mac_addr;
3286 /* search for beginning of vpd */
3288 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3289 /* check for PCIR */
3290 if ((readb(p + i + 0) == 0x50) &&
3291 (readb(p + i + 1) == 0x43) &&
3292 (readb(p + i + 2) == 0x49) &&
3293 (readb(p + i + 3) == 0x52)) {
3294 base = p + (readb(p + i + 8) |
3295 (readb(p + i + 9) << 8));
3300 if (!base || (readb(base) != 0x82))
3301 goto use_random_mac_addr;
3303 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3304 while (i < EXPANSION_ROM_SIZE) {
3305 if (readb(base + i) != 0x90) /* no vpd found */
3306 goto use_random_mac_addr;
3308 /* found a vpd field */
3309 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3311 /* extract keywords */
3312 kstart = base + i + 3;
3314 while ((p - kstart) < len) {
3315 int klen = readb(p + 2);
3321 /* look for the following things:
3322 * -- correct length == 29
3323 * 3 (type) + 2 (size) +
3324 * 18 (strlen("local-mac-address") + 1) +
3326 * -- VPD Instance 'I'
3327 * -- VPD Type Bytes 'B'
3328 * -- VPD data length == 6
3329 * -- property string == local-mac-address
3331 * -- correct length == 24
3332 * 3 (type) + 2 (size) +
3333 * 12 (strlen("entropy-dev") + 1) +
3334 * 7 (strlen("vms110") + 1)
3335 * -- VPD Instance 'I'
3336 * -- VPD Type String 'B'
3337 * -- VPD data length == 7
3338 * -- property string == entropy-dev
3340 * -- correct length == 18
3341 * 3 (type) + 2 (size) +
3342 * 9 (strlen("phy-type") + 1) +
3343 * 4 (strlen("pcs") + 1)
3344 * -- VPD Instance 'I'
3345 * -- VPD Type String 'S'
3346 * -- VPD data length == 4
3347 * -- property string == phy-type
3349 * -- correct length == 23
3350 * 3 (type) + 2 (size) +
3351 * 14 (strlen("phy-interface") + 1) +
3352 * 4 (strlen("pcs") + 1)
3353 * -- VPD Instance 'I'
3354 * -- VPD Type String 'S'
3355 * -- VPD data length == 4
3356 * -- property string == phy-interface
3358 if (readb(p) != 'I')
3361 /* finally, check string and length */
3362 type = readb(p + 3);
3364 if ((klen == 29) && readb(p + 4) == 6 &&
3365 cas_vpd_match(p + 5,
3366 "local-mac-address")) {
3367 if (mac_off++ > offset)
3370 /* set mac address */
3371 for (j = 0; j < 6; j++)
3381 #ifdef USE_ENTROPY_DEV
3383 cas_vpd_match(p + 5, "entropy-dev") &&
3384 cas_vpd_match(p + 17, "vms110")) {
3385 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3390 if (found & VPD_FOUND_PHY)
3393 if ((klen == 18) && readb(p + 4) == 4 &&
3394 cas_vpd_match(p + 5, "phy-type")) {
3395 if (cas_vpd_match(p + 14, "pcs")) {
3396 phy_type = CAS_PHY_SERDES;
3401 if ((klen == 23) && readb(p + 4) == 4 &&
3402 cas_vpd_match(p + 5, "phy-interface")) {
3403 if (cas_vpd_match(p + 19, "pcs")) {
3404 phy_type = CAS_PHY_SERDES;
3409 found |= VPD_FOUND_MAC;
3413 found |= VPD_FOUND_PHY;
3421 use_random_mac_addr:
3422 if (found & VPD_FOUND_MAC)
3425 /* Sun MAC prefix then 3 random bytes. */
3426 printk(PFX "MAC address not found in ROM VPD\n");
3430 get_random_bytes(dev_addr + 3, 3);
3433 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3437 /* check pci invariants */
3438 static void cas_check_pci_invariants(struct cas *cp)
3440 struct pci_dev *pdev = cp->pdev;
3443 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3444 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3445 if (pdev->revision >= CAS_ID_REVPLUS)
3446 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3447 if (pdev->revision < CAS_ID_REVPLUS02u)
3448 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3450 /* Original Cassini supports HW CSUM, but it's not
3451 * enabled by default as it can trigger TX hangs.
3453 if (pdev->revision < CAS_ID_REV2)
3454 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3456 /* Only sun has original cassini chips. */
3457 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3459 /* We use a flag because the same phy might be externally
3462 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3463 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3464 cp->cas_flags |= CAS_FLAG_SATURN;
3469 static int cas_check_invariants(struct cas *cp)
3471 struct pci_dev *pdev = cp->pdev;
3475 /* get page size for rx buffers. */
3477 #ifdef USE_PAGE_ORDER
3478 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3479 /* see if we can allocate larger pages */
3480 struct page *page = alloc_pages(GFP_ATOMIC,
3481 CAS_JUMBO_PAGE_SHIFT -
3484 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3485 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3487 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
3491 cp->page_size = (PAGE_SIZE << cp->page_order);
3493 /* Fetch the FIFO configurations. */
3494 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3495 cp->rx_fifo_size = RX_FIFO_SIZE;
3497 /* finish phy determination. MDIO1 takes precedence over MDIO0 if
3498 * they're both connected.
3500 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3501 PCI_SLOT(pdev->devfn));
3502 if (cp->phy_type & CAS_PHY_SERDES) {
3503 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3504 return 0; /* no more checking needed */
3508 cfg = readl(cp->regs + REG_MIF_CFG);
3509 if (cfg & MIF_CFG_MDIO_1) {
3510 cp->phy_type = CAS_PHY_MII_MDIO1;
3511 } else if (cfg & MIF_CFG_MDIO_0) {
3512 cp->phy_type = CAS_PHY_MII_MDIO0;
3515 cas_mif_poll(cp, 0);
3516 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3518 for (i = 0; i < 32; i++) {
3522 for (j = 0; j < 3; j++) {
3524 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3525 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3526 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3527 cp->phy_id = phy_id;
3532 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
3533 readl(cp->regs + REG_MIF_STATE_MACHINE));
3537 /* see if we can do gigabit */
3538 cfg = cas_phy_read(cp, MII_BMSR);
3539 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3540 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3541 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3545 /* Must be invoked under cp->lock. */
3546 static inline void cas_start_dma(struct cas *cp)
3553 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3554 writel(val, cp->regs + REG_TX_CFG);
3555 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3556 writel(val, cp->regs + REG_RX_CFG);
3558 /* enable the mac */
3559 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3560 writel(val, cp->regs + REG_MAC_TX_CFG);
3561 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3562 writel(val, cp->regs + REG_MAC_RX_CFG);
3566 val = readl(cp->regs + REG_MAC_TX_CFG);
3567 if ((val & MAC_TX_CFG_EN))
3571 if (i < 0) txfailed = 1;
3574 val = readl(cp->regs + REG_MAC_RX_CFG);
3575 if ((val & MAC_RX_CFG_EN)) {
3578 "%s: enabling mac failed [tx:%08x:%08x].\n",
3580 readl(cp->regs + REG_MIF_STATE_MACHINE),
3581 readl(cp->regs + REG_MAC_STATE_MACHINE));
3583 goto enable_rx_done;
3587 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
3589 (txfailed? "tx,rx":"rx"),
3590 readl(cp->regs + REG_MIF_STATE_MACHINE),
3591 readl(cp->regs + REG_MAC_STATE_MACHINE));
3594 cas_unmask_intr(cp); /* enable interrupts */
3595 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3596 writel(0, cp->regs + REG_RX_COMP_TAIL);
3598 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3599 if (N_RX_DESC_RINGS > 1)
3600 writel(RX_DESC_RINGN_SIZE(1) - 4,
3601 cp->regs + REG_PLUS_RX_KICK1);
3603 for (i = 1; i < N_RX_COMP_RINGS; i++)
3604 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3608 /* Must be invoked under cp->lock. */
3609 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3612 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3613 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3614 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3615 if (val & PCS_MII_LPA_ASYM_PAUSE)
3620 /* Must be invoked under cp->lock. */
3621 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3630 /* use GMII registers */
3631 val = cas_phy_read(cp, MII_LPA);
3632 if (val & CAS_LPA_PAUSE)
3635 if (val & CAS_LPA_ASYM_PAUSE)
3638 if (val & LPA_DUPLEX)
3643 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3644 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3645 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3647 if (val & CAS_LPA_1000FULL)
3652 /* A link-up condition has occurred, initialize and enable the
3655 * Must be invoked under cp->lock.
3657 static void cas_set_link_modes(struct cas *cp)
3660 int full_duplex, speed, pause;
3666 if (CAS_PHY_MII(cp->phy_type)) {
3667 cas_mif_poll(cp, 0);
3668 val = cas_phy_read(cp, MII_BMCR);
3669 if (val & BMCR_ANENABLE) {
3670 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3673 if (val & BMCR_FULLDPLX)
3676 if (val & BMCR_SPEED100)
3678 else if (val & CAS_BMCR_SPEED1000)
3679 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3682 cas_mif_poll(cp, 1);
3685 val = readl(cp->regs + REG_PCS_MII_CTRL);
3686 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3687 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3688 if (val & PCS_MII_CTRL_DUPLEX)
3693 if (netif_msg_link(cp))
3694 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
3695 cp->dev->name, speed, (full_duplex ? "full" : "half"));
3697 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3698 if (CAS_PHY_MII(cp->phy_type)) {
3699 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3701 val |= MAC_XIF_DISABLE_ECHO;
3704 val |= MAC_XIF_FDPLX_LED;
3706 val |= MAC_XIF_GMII_MODE;
3707 writel(val, cp->regs + REG_MAC_XIF_CFG);
3709 /* deal with carrier and collision detect. */
3710 val = MAC_TX_CFG_IPG_EN;
3712 val |= MAC_TX_CFG_IGNORE_CARRIER;
3713 val |= MAC_TX_CFG_IGNORE_COLL;
3715 #ifndef USE_CSMA_CD_PROTO
3716 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3717 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3720 /* val now set up for REG_MAC_TX_CFG */
3722 /* If gigabit and half-duplex, enable carrier extension
3723 * mode. increase slot time to 512 bytes as well.
3724 * else, disable it and make sure slot time is 64 bytes.
3725 * also activate checksum bug workaround
3727 if ((speed == 1000) && !full_duplex) {
3728 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3729 cp->regs + REG_MAC_TX_CFG);
3731 val = readl(cp->regs + REG_MAC_RX_CFG);
3732 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3733 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3734 cp->regs + REG_MAC_RX_CFG);
3736 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3739 /* minimum size gigabit frame at half duplex */
3740 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3743 writel(val, cp->regs + REG_MAC_TX_CFG);
3745 /* checksum bug workaround. don't strip FCS when in
3748 val = readl(cp->regs + REG_MAC_RX_CFG);
3750 val |= MAC_RX_CFG_STRIP_FCS;
3752 cp->min_frame_size = CAS_MIN_MTU;
3754 val &= ~MAC_RX_CFG_STRIP_FCS;
3756 cp->min_frame_size = CAS_MIN_FRAME;
3758 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3759 cp->regs + REG_MAC_RX_CFG);
3760 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3763 if (netif_msg_link(cp)) {
3765 printk(KERN_INFO "%s: Pause is enabled "
3766 "(rxfifo: %d off: %d on: %d)\n",
3771 } else if (pause & 0x10) {
3772 printk(KERN_INFO "%s: TX pause enabled\n",
3775 printk(KERN_INFO "%s: Pause is disabled\n",
3780 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3781 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3782 if (pause) { /* symmetric or asymmetric pause */
3783 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3784 if (pause & 0x01) { /* symmetric pause */
3785 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3788 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3792 /* Must be invoked under cp->lock. */
3793 static void cas_init_hw(struct cas *cp, int restart_link)
3798 cas_init_pause_thresholds(cp);
3803 /* Default aneg parameters */
3804 cp->timer_ticks = 0;
3805 cas_begin_auto_negotiation(cp, NULL);
3806 } else if (cp->lstate == link_up) {
3807 cas_set_link_modes(cp);
3808 netif_carrier_on(cp->dev);
3812 /* Must be invoked under cp->lock. on earlier cassini boards,
3813 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3814 * let it settle out, and then restore pci state.
3816 static void cas_hard_reset(struct cas *cp)
3818 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3820 pci_restore_state(cp->pdev);
3824 static void cas_global_reset(struct cas *cp, int blkflag)
3828 /* issue a global reset. don't use RSTOUT. */
3829 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3830 /* For PCS, when the blkflag is set, we should set the
3831 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3832 * the last autonegotiation from being cleared. We'll
3833 * need some special handling if the chip is set into a
3836 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3837 cp->regs + REG_SW_RESET);
3839 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3842 /* need to wait at least 3ms before polling register */
3846 while (limit-- > 0) {
3847 u32 val = readl(cp->regs + REG_SW_RESET);
3848 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3852 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
3855 /* enable various BIM interrupts */
3856 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3857 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3859 /* clear out pci error status mask for handled errors.
3860 * we don't deal with DMA counter overflows as they happen
3863 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3864 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3865 PCI_ERR_BIM_DMA_READ), cp->regs +
3866 REG_PCI_ERR_STATUS_MASK);
3868 /* set up for MII by default to address mac rx reset timeout
3871 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3874 static void cas_reset(struct cas *cp, int blkflag)
3879 cas_global_reset(cp, blkflag);
3881 cas_entropy_reset(cp);
3883 /* disable dma engines. */
3884 val = readl(cp->regs + REG_TX_CFG);
3885 val &= ~TX_CFG_DMA_EN;
3886 writel(val, cp->regs + REG_TX_CFG);
3888 val = readl(cp->regs + REG_RX_CFG);
3889 val &= ~RX_CFG_DMA_EN;
3890 writel(val, cp->regs + REG_RX_CFG);
3892 /* program header parser */
3893 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3894 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3895 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3897 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3900 /* clear out error registers */
3901 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3902 cas_clear_mac_err(cp);
3903 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3906 /* Shut down the chip, must be called with pm_mutex held. */
3907 static void cas_shutdown(struct cas *cp)
3909 unsigned long flags;
3911 /* Make us not-running to avoid timers respawning */
3914 del_timer_sync(&cp->link_timer);
3916 /* Stop the reset task */
3918 while (atomic_read(&cp->reset_task_pending_mtu) ||
3919 atomic_read(&cp->reset_task_pending_spare) ||
3920 atomic_read(&cp->reset_task_pending_all))
3924 while (atomic_read(&cp->reset_task_pending))
3927 /* Actually stop the chip */
3928 cas_lock_all_save(cp, flags);
3930 if (cp->cas_flags & CAS_FLAG_SATURN)
3931 cas_phy_powerdown(cp);
3932 cas_unlock_all_restore(cp, flags);
3935 static int cas_change_mtu(struct net_device *dev, int new_mtu)
3937 struct cas *cp = netdev_priv(dev);
3939 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3943 if (!netif_running(dev) || !netif_device_present(dev))
3946 /* let the reset task handle it */
3948 atomic_inc(&cp->reset_task_pending);
3949 if ((cp->phy_type & CAS_PHY_SERDES)) {
3950 atomic_inc(&cp->reset_task_pending_all);
3952 atomic_inc(&cp->reset_task_pending_mtu);
3954 schedule_work(&cp->reset_task);
3956 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3957 CAS_RESET_ALL : CAS_RESET_MTU);
3958 printk(KERN_ERR "reset called in cas_change_mtu\n");
3959 schedule_work(&cp->reset_task);
3962 flush_scheduled_work();
3966 static void cas_clean_txd(struct cas *cp, int ring)
3968 struct cas_tx_desc *txd = cp->init_txds[ring];
3969 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3973 size = TX_DESC_RINGN_SIZE(ring);
3974 for (i = 0; i < size; i++) {
3977 if (skbs[i] == NULL)
3983 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3984 int ent = i & (size - 1);
3986 /* first buffer is never a tiny buffer and so
3987 * needs to be unmapped.
3989 daddr = le64_to_cpu(txd[ent].buffer);
3990 dlen = CAS_VAL(TX_DESC_BUFLEN,
3991 le64_to_cpu(txd[ent].control));
3992 pci_unmap_page(cp->pdev, daddr, dlen,
3995 if (frag != skb_shinfo(skb)->nr_frags) {
3998 /* next buffer might by a tiny buffer.
4001 ent = i & (size - 1);
4002 if (cp->tx_tiny_use[ring][ent].used)
4006 dev_kfree_skb_any(skb);
4009 /* zero out tiny buf usage */
4010 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
4013 /* freed on close */
4014 static inline void cas_free_rx_desc(struct cas *cp, int ring)
4016 cas_page_t **page = cp->rx_pages[ring];
4019 size = RX_DESC_RINGN_SIZE(ring);
4020 for (i = 0; i < size; i++) {
4022 cas_page_free(cp, page[i]);
4028 static void cas_free_rxds(struct cas *cp)
4032 for (i = 0; i < N_RX_DESC_RINGS; i++)
4033 cas_free_rx_desc(cp, i);
4036 /* Must be invoked under cp->lock. */
4037 static void cas_clean_rings(struct cas *cp)
4041 /* need to clean all tx rings */
4042 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
4043 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
4044 for (i = 0; i < N_TX_RINGS; i++)
4045 cas_clean_txd(cp, i);
4047 /* zero out init block */
4048 memset(cp->init_block, 0, sizeof(struct cas_init_block));
4053 /* allocated on open */
4054 static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
4056 cas_page_t **page = cp->rx_pages[ring];
4059 size = RX_DESC_RINGN_SIZE(ring);
4060 for (i = 0; i < size; i++) {
4061 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
4067 static int cas_alloc_rxds(struct cas *cp)
4071 for (i = 0; i < N_RX_DESC_RINGS; i++) {
4072 if (cas_alloc_rx_desc(cp, i) < 0) {
4080 static void cas_reset_task(struct work_struct *work)
4082 struct cas *cp = container_of(work, struct cas, reset_task);
4084 int pending = atomic_read(&cp->reset_task_pending);
4086 int pending_all = atomic_read(&cp->reset_task_pending_all);
4087 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4088 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4090 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4091 /* We can have more tasks scheduled than actually
4094 atomic_dec(&cp->reset_task_pending);
4098 /* The link went down, we reset the ring, but keep
4099 * DMA stopped. Use this function for reset
4102 if (cp->hw_running) {
4103 unsigned long flags;
4105 /* Make sure we don't get interrupts or tx packets */
4106 netif_device_detach(cp->dev);
4107 cas_lock_all_save(cp, flags);
4110 /* We call cas_spare_recover when we call cas_open.
4111 * but we do not initialize the lists cas_spare_recover
4112 * uses until cas_open is called.
4114 cas_spare_recover(cp, GFP_ATOMIC);
4117 /* test => only pending_spare set */
4118 if (!pending_all && !pending_mtu)
4121 if (pending == CAS_RESET_SPARE)
4124 /* when pending == CAS_RESET_ALL, the following
4125 * call to cas_init_hw will restart auto negotiation.
4126 * Setting the second argument of cas_reset to
4127 * !(pending == CAS_RESET_ALL) will set this argument
4128 * to 1 (avoiding reinitializing the PHY for the normal
4129 * PCS case) when auto negotiation is not restarted.
4132 cas_reset(cp, !(pending_all > 0));
4134 cas_clean_rings(cp);
4135 cas_init_hw(cp, (pending_all > 0));
4137 cas_reset(cp, !(pending == CAS_RESET_ALL));
4139 cas_clean_rings(cp);
4140 cas_init_hw(cp, pending == CAS_RESET_ALL);
4144 cas_unlock_all_restore(cp, flags);
4145 netif_device_attach(cp->dev);
4148 atomic_sub(pending_all, &cp->reset_task_pending_all);
4149 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4150 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4151 atomic_dec(&cp->reset_task_pending);
4153 atomic_set(&cp->reset_task_pending, 0);
4157 static void cas_link_timer(unsigned long data)
4159 struct cas *cp = (struct cas *) data;
4160 int mask, pending = 0, reset = 0;
4161 unsigned long flags;
4163 if (link_transition_timeout != 0 &&
4164 cp->link_transition_jiffies_valid &&
4165 ((jiffies - cp->link_transition_jiffies) >
4166 (link_transition_timeout))) {
4167 /* One-second counter so link-down workaround doesn't
4168 * cause resets to occur so fast as to fool the switch
4169 * into thinking the link is down.
4171 cp->link_transition_jiffies_valid = 0;
4174 if (!cp->hw_running)
4177 spin_lock_irqsave(&cp->lock, flags);
4179 cas_entropy_gather(cp);
4181 /* If the link task is still pending, we just
4182 * reschedule the link timer
4185 if (atomic_read(&cp->reset_task_pending_all) ||
4186 atomic_read(&cp->reset_task_pending_spare) ||
4187 atomic_read(&cp->reset_task_pending_mtu))
4190 if (atomic_read(&cp->reset_task_pending))
4194 /* check for rx cleaning */
4195 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4198 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4199 rmask = CAS_FLAG_RXD_POST(i);
4200 if ((mask & rmask) == 0)
4203 /* post_rxds will do a mod_timer */
4204 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4208 cp->cas_flags &= ~rmask;
4212 if (CAS_PHY_MII(cp->phy_type)) {
4214 cas_mif_poll(cp, 0);
4215 bmsr = cas_phy_read(cp, MII_BMSR);
4216 /* WTZ: Solaris driver reads this twice, but that
4217 * may be due to the PCS case and the use of a
4218 * common implementation. Read it twice here to be
4221 bmsr = cas_phy_read(cp, MII_BMSR);
4222 cas_mif_poll(cp, 1);
4223 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4224 reset = cas_mii_link_check(cp, bmsr);
4226 reset = cas_pcs_link_check(cp);
4232 /* check for tx state machine confusion */
4233 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4234 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4236 int tlm = CAS_VAL(MAC_SM_TLM, val);
4238 if (((tlm == 0x5) || (tlm == 0x3)) &&
4239 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4240 if (netif_msg_tx_err(cp))
4241 printk(KERN_DEBUG "%s: tx err: "
4242 "MAC_STATE[%08x]\n",
4243 cp->dev->name, val);
4248 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4249 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4250 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4251 if ((val == 0) && (wptr != rptr)) {
4252 if (netif_msg_tx_err(cp))
4253 printk(KERN_DEBUG "%s: tx err: "
4254 "TX_FIFO[%08x:%08x:%08x]\n",
4255 cp->dev->name, val, wptr, rptr);
4266 atomic_inc(&cp->reset_task_pending);
4267 atomic_inc(&cp->reset_task_pending_all);
4268 schedule_work(&cp->reset_task);
4270 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4271 printk(KERN_ERR "reset called in cas_link_timer\n");
4272 schedule_work(&cp->reset_task);
4277 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4279 spin_unlock_irqrestore(&cp->lock, flags);
4282 /* tiny buffers are used to avoid target abort issues with
4285 static void cas_tx_tiny_free(struct cas *cp)
4287 struct pci_dev *pdev = cp->pdev;
4290 for (i = 0; i < N_TX_RINGS; i++) {
4291 if (!cp->tx_tiny_bufs[i])
4294 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4295 cp->tx_tiny_bufs[i],
4296 cp->tx_tiny_dvma[i]);
4297 cp->tx_tiny_bufs[i] = NULL;
4301 static int cas_tx_tiny_alloc(struct cas *cp)
4303 struct pci_dev *pdev = cp->pdev;
4306 for (i = 0; i < N_TX_RINGS; i++) {
4307 cp->tx_tiny_bufs[i] =
4308 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4309 &cp->tx_tiny_dvma[i]);
4310 if (!cp->tx_tiny_bufs[i]) {
4311 cas_tx_tiny_free(cp);
4319 static int cas_open(struct net_device *dev)
4321 struct cas *cp = netdev_priv(dev);
4323 unsigned long flags;
4325 mutex_lock(&cp->pm_mutex);
4327 hw_was_up = cp->hw_running;
4329 /* The power-management mutex protects the hw_running
4330 * etc. state so it is safe to do this bit without cp->lock
4332 if (!cp->hw_running) {
4333 /* Reset the chip */
4334 cas_lock_all_save(cp, flags);
4335 /* We set the second arg to cas_reset to zero
4336 * because cas_init_hw below will have its second
4337 * argument set to non-zero, which will force
4338 * autonegotiation to start.
4342 cas_unlock_all_restore(cp, flags);
4345 if (cas_tx_tiny_alloc(cp) < 0)
4348 /* alloc rx descriptors */
4350 if (cas_alloc_rxds(cp) < 0)
4353 /* allocate spares */
4355 cas_spare_recover(cp, GFP_KERNEL);
4357 /* We can now request the interrupt as we know it's masked
4358 * on the controller. cassini+ has up to 4 interrupts
4359 * that can be used, but you need to do explicit pci interrupt
4360 * mapping to expose them
4362 if (request_irq(cp->pdev->irq, cas_interrupt,
4363 IRQF_SHARED, dev->name, (void *) dev)) {
4364 printk(KERN_ERR "%s: failed to request irq !\n",
4371 napi_enable(&cp->napi);
4374 cas_lock_all_save(cp, flags);
4375 cas_clean_rings(cp);
4376 cas_init_hw(cp, !hw_was_up);
4378 cas_unlock_all_restore(cp, flags);
4380 netif_start_queue(dev);
4381 mutex_unlock(&cp->pm_mutex);
4388 cas_tx_tiny_free(cp);
4389 mutex_unlock(&cp->pm_mutex);
4393 static int cas_close(struct net_device *dev)
4395 unsigned long flags;
4396 struct cas *cp = netdev_priv(dev);
4399 napi_disable(&cp->napi);
4401 /* Make sure we don't get distracted by suspend/resume */
4402 mutex_lock(&cp->pm_mutex);
4404 netif_stop_queue(dev);
4406 /* Stop traffic, mark us closed */
4407 cas_lock_all_save(cp, flags);
4411 cas_begin_auto_negotiation(cp, NULL);
4412 cas_clean_rings(cp);
4413 cas_unlock_all_restore(cp, flags);
4415 free_irq(cp->pdev->irq, (void *) dev);
4418 cas_tx_tiny_free(cp);
4419 mutex_unlock(&cp->pm_mutex);
4424 const char name[ETH_GSTRING_LEN];
4425 } ethtool_cassini_statnames[] = {
4432 {"rx_frame_errors"},
4433 {"rx_length_errors"},
4436 {"tx_aborted_errors"},
4443 #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4446 const int offsets; /* neg. values for 2nd arg to cas_read_phy */
4447 } ethtool_register_table[] = {
4462 {REG_PCS_MII_STATUS},
4463 {REG_PCS_STATE_MACHINE},
4464 {REG_MAC_COLL_EXCESS},
4467 #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4468 #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4470 static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4474 unsigned long flags;
4476 spin_lock_irqsave(&cp->lock, flags);
4477 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4480 if (ethtool_register_table[i].offsets < 0) {
4481 hval = cas_phy_read(cp,
4482 -ethtool_register_table[i].offsets);
4485 val= readl(cp->regs+ethtool_register_table[i].offsets);
4487 memcpy(p, (u8 *)&val, sizeof(u32));
4489 spin_unlock_irqrestore(&cp->lock, flags);
4492 static struct net_device_stats *cas_get_stats(struct net_device *dev)
4494 struct cas *cp = netdev_priv(dev);
4495 struct net_device_stats *stats = cp->net_stats;
4496 unsigned long flags;
4500 /* we collate all of the stats into net_stats[N_TX_RING] */
4501 if (!cp->hw_running)
4502 return stats + N_TX_RINGS;
4504 /* collect outstanding stats */
4505 /* WTZ: the Cassini spec gives these as 16 bit counters but
4506 * stored in 32-bit words. Added a mask of 0xffff to be safe,
4507 * in case the chip somehow puts any garbage in the other bits.
4508 * Also, counter usage didn't seem to mach what Adrian did
4509 * in the parts of the code that set these quantities. Made
4512 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4513 stats[N_TX_RINGS].rx_crc_errors +=
4514 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4515 stats[N_TX_RINGS].rx_frame_errors +=
4516 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4517 stats[N_TX_RINGS].rx_length_errors +=
4518 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4520 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4521 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4522 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4523 stats[N_TX_RINGS].collisions +=
4524 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4526 stats[N_TX_RINGS].tx_aborted_errors +=
4527 readl(cp->regs + REG_MAC_COLL_EXCESS);
4528 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4529 readl(cp->regs + REG_MAC_COLL_LATE);
4531 cas_clear_mac_err(cp);
4533 /* saved bits that are unique to ring 0 */
4534 spin_lock(&cp->stat_lock[0]);
4535 stats[N_TX_RINGS].collisions += stats[0].collisions;
4536 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4537 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4538 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4539 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4540 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4541 spin_unlock(&cp->stat_lock[0]);
4543 for (i = 0; i < N_TX_RINGS; i++) {
4544 spin_lock(&cp->stat_lock[i]);
4545 stats[N_TX_RINGS].rx_length_errors +=
4546 stats[i].rx_length_errors;
4547 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4548 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4549 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4550 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4551 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4552 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4553 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4554 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4555 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4556 memset(stats + i, 0, sizeof(struct net_device_stats));
4557 spin_unlock(&cp->stat_lock[i]);
4559 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4560 return stats + N_TX_RINGS;
4564 static void cas_set_multicast(struct net_device *dev)
4566 struct cas *cp = netdev_priv(dev);
4567 u32 rxcfg, rxcfg_new;
4568 unsigned long flags;
4569 int limit = STOP_TRIES;
4571 if (!cp->hw_running)
4574 spin_lock_irqsave(&cp->lock, flags);
4575 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4577 /* disable RX MAC and wait for completion */
4578 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4579 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4585 /* disable hash filter and wait for completion */
4587 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4588 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4589 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4595 /* program hash filters */
4596 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4598 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4599 spin_unlock_irqrestore(&cp->lock, flags);
4602 static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4604 struct cas *cp = netdev_priv(dev);
4605 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4606 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4607 info->fw_version[0] = '\0';
4608 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4609 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4610 cp->casreg_len : CAS_MAX_REGS;
4611 info->n_stats = CAS_NUM_STAT_KEYS;
4614 static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4616 struct cas *cp = netdev_priv(dev);
4618 int full_duplex, speed, pause;
4619 unsigned long flags;
4620 enum link_state linkstate = link_up;
4622 cmd->advertising = 0;
4623 cmd->supported = SUPPORTED_Autoneg;
4624 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4625 cmd->supported |= SUPPORTED_1000baseT_Full;
4626 cmd->advertising |= ADVERTISED_1000baseT_Full;
4629 /* Record PHY settings if HW is on. */
4630 spin_lock_irqsave(&cp->lock, flags);
4632 linkstate = cp->lstate;
4633 if (CAS_PHY_MII(cp->phy_type)) {
4634 cmd->port = PORT_MII;
4635 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4636 XCVR_INTERNAL : XCVR_EXTERNAL;
4637 cmd->phy_address = cp->phy_addr;
4638 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4639 ADVERTISED_10baseT_Half |
4640 ADVERTISED_10baseT_Full |
4641 ADVERTISED_100baseT_Half |
4642 ADVERTISED_100baseT_Full;
4645 (SUPPORTED_10baseT_Half |
4646 SUPPORTED_10baseT_Full |
4647 SUPPORTED_100baseT_Half |
4648 SUPPORTED_100baseT_Full |
4649 SUPPORTED_TP | SUPPORTED_MII);
4651 if (cp->hw_running) {
4652 cas_mif_poll(cp, 0);
4653 bmcr = cas_phy_read(cp, MII_BMCR);
4654 cas_read_mii_link_mode(cp, &full_duplex,
4656 cas_mif_poll(cp, 1);
4660 cmd->port = PORT_FIBRE;
4661 cmd->transceiver = XCVR_INTERNAL;
4662 cmd->phy_address = 0;
4663 cmd->supported |= SUPPORTED_FIBRE;
4664 cmd->advertising |= ADVERTISED_FIBRE;
4666 if (cp->hw_running) {
4667 /* pcs uses the same bits as mii */
4668 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4669 cas_read_pcs_link_mode(cp, &full_duplex,
4673 spin_unlock_irqrestore(&cp->lock, flags);
4675 if (bmcr & BMCR_ANENABLE) {
4676 cmd->advertising |= ADVERTISED_Autoneg;
4677 cmd->autoneg = AUTONEG_ENABLE;
4678 cmd->speed = ((speed == 10) ?
4681 SPEED_1000 : SPEED_100));
4682 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4684 cmd->autoneg = AUTONEG_DISABLE;
4686 (bmcr & CAS_BMCR_SPEED1000) ?
4688 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4691 (bmcr & BMCR_FULLDPLX) ?
4692 DUPLEX_FULL : DUPLEX_HALF;
4694 if (linkstate != link_up) {
4695 /* Force these to "unknown" if the link is not up and
4696 * autonogotiation in enabled. We can set the link
4697 * speed to 0, but not cmd->duplex,
4698 * because its legal values are 0 and 1. Ethtool will
4699 * print the value reported in parentheses after the
4700 * word "Unknown" for unrecognized values.
4702 * If in forced mode, we report the speed and duplex
4703 * settings that we configured.
4705 if (cp->link_cntl & BMCR_ANENABLE) {
4709 cmd->speed = SPEED_10;
4710 if (cp->link_cntl & BMCR_SPEED100) {
4711 cmd->speed = SPEED_100;
4712 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4713 cmd->speed = SPEED_1000;
4715 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4716 DUPLEX_FULL : DUPLEX_HALF;
4722 static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4724 struct cas *cp = netdev_priv(dev);
4725 unsigned long flags;
4727 /* Verify the settings we care about. */
4728 if (cmd->autoneg != AUTONEG_ENABLE &&
4729 cmd->autoneg != AUTONEG_DISABLE)
4732 if (cmd->autoneg == AUTONEG_DISABLE &&
4733 ((cmd->speed != SPEED_1000 &&
4734 cmd->speed != SPEED_100 &&
4735 cmd->speed != SPEED_10) ||
4736 (cmd->duplex != DUPLEX_HALF &&
4737 cmd->duplex != DUPLEX_FULL)))
4740 /* Apply settings and restart link process. */
4741 spin_lock_irqsave(&cp->lock, flags);
4742 cas_begin_auto_negotiation(cp, cmd);
4743 spin_unlock_irqrestore(&cp->lock, flags);
4747 static int cas_nway_reset(struct net_device *dev)
4749 struct cas *cp = netdev_priv(dev);
4750 unsigned long flags;
4752 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4755 /* Restart link process. */
4756 spin_lock_irqsave(&cp->lock, flags);
4757 cas_begin_auto_negotiation(cp, NULL);
4758 spin_unlock_irqrestore(&cp->lock, flags);
4763 static u32 cas_get_link(struct net_device *dev)
4765 struct cas *cp = netdev_priv(dev);
4766 return cp->lstate == link_up;
4769 static u32 cas_get_msglevel(struct net_device *dev)
4771 struct cas *cp = netdev_priv(dev);
4772 return cp->msg_enable;
4775 static void cas_set_msglevel(struct net_device *dev, u32 value)
4777 struct cas *cp = netdev_priv(dev);
4778 cp->msg_enable = value;
4781 static int cas_get_regs_len(struct net_device *dev)
4783 struct cas *cp = netdev_priv(dev);
4784 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4787 static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4790 struct cas *cp = netdev_priv(dev);
4792 /* cas_read_regs handles locks (cp->lock). */
4793 cas_read_regs(cp, p, regs->len / sizeof(u32));
4796 static int cas_get_sset_count(struct net_device *dev, int sset)
4800 return CAS_NUM_STAT_KEYS;
4806 static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4808 memcpy(data, ðtool_cassini_statnames,
4809 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4812 static void cas_get_ethtool_stats(struct net_device *dev,
4813 struct ethtool_stats *estats, u64 *data)
4815 struct cas *cp = netdev_priv(dev);
4816 struct net_device_stats *stats = cas_get_stats(cp->dev);
4818 data[i++] = stats->collisions;
4819 data[i++] = stats->rx_bytes;
4820 data[i++] = stats->rx_crc_errors;
4821 data[i++] = stats->rx_dropped;
4822 data[i++] = stats->rx_errors;
4823 data[i++] = stats->rx_fifo_errors;
4824 data[i++] = stats->rx_frame_errors;
4825 data[i++] = stats->rx_length_errors;
4826 data[i++] = stats->rx_over_errors;
4827 data[i++] = stats->rx_packets;
4828 data[i++] = stats->tx_aborted_errors;
4829 data[i++] = stats->tx_bytes;
4830 data[i++] = stats->tx_dropped;
4831 data[i++] = stats->tx_errors;
4832 data[i++] = stats->tx_fifo_errors;
4833 data[i++] = stats->tx_packets;
4834 BUG_ON(i != CAS_NUM_STAT_KEYS);
4837 static const struct ethtool_ops cas_ethtool_ops = {
4838 .get_drvinfo = cas_get_drvinfo,
4839 .get_settings = cas_get_settings,
4840 .set_settings = cas_set_settings,
4841 .nway_reset = cas_nway_reset,
4842 .get_link = cas_get_link,
4843 .get_msglevel = cas_get_msglevel,
4844 .set_msglevel = cas_set_msglevel,
4845 .get_regs_len = cas_get_regs_len,
4846 .get_regs = cas_get_regs,
4847 .get_sset_count = cas_get_sset_count,
4848 .get_strings = cas_get_strings,
4849 .get_ethtool_stats = cas_get_ethtool_stats,
4852 static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4854 struct cas *cp = netdev_priv(dev);
4855 struct mii_ioctl_data *data = if_mii(ifr);
4856 unsigned long flags;
4857 int rc = -EOPNOTSUPP;
4859 /* Hold the PM mutex while doing ioctl's or we may collide
4860 * with open/close and power management and oops.
4862 mutex_lock(&cp->pm_mutex);
4864 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
4865 data->phy_id = cp->phy_addr;
4866 /* Fallthrough... */
4868 case SIOCGMIIREG: /* Read MII PHY register. */
4869 spin_lock_irqsave(&cp->lock, flags);
4870 cas_mif_poll(cp, 0);
4871 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4872 cas_mif_poll(cp, 1);
4873 spin_unlock_irqrestore(&cp->lock, flags);
4877 case SIOCSMIIREG: /* Write MII PHY register. */
4878 if (!capable(CAP_NET_ADMIN)) {
4882 spin_lock_irqsave(&cp->lock, flags);
4883 cas_mif_poll(cp, 0);
4884 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4885 cas_mif_poll(cp, 1);
4886 spin_unlock_irqrestore(&cp->lock, flags);
4892 mutex_unlock(&cp->pm_mutex);
4896 /* When this chip sits underneath an Intel 31154 bridge, it is the
4897 * only subordinate device and we can tweak the bridge settings to
4898 * reflect that fact.
4900 static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
4902 struct pci_dev *pdev = cas_pdev->bus->self;
4908 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4911 /* Clear bit 10 (Bus Parking Control) in the Secondary
4912 * Arbiter Control/Status Register which lives at offset
4913 * 0x41. Using a 32-bit word read/modify/write at 0x40
4914 * is much simpler so that's how we do this.
4916 pci_read_config_dword(pdev, 0x40, &val);
4918 pci_write_config_dword(pdev, 0x40, val);
4920 /* Max out the Multi-Transaction Timer settings since
4921 * Cassini is the only device present.
4923 * The register is 16-bit and lives at 0x50. When the
4924 * settings are enabled, it extends the GRANT# signal
4925 * for a requestor after a transaction is complete. This
4926 * allows the next request to run without first needing
4927 * to negotiate the GRANT# signal back.
4929 * Bits 12:10 define the grant duration:
4937 * All other values are illegal.
4939 * Bits 09:00 define which REQ/GNT signal pairs get the
4940 * GRANT# signal treatment. We set them all.
4942 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4944 /* The Read Prefecth Policy register is 16-bit and sits at
4945 * offset 0x52. It enables a "smart" pre-fetch policy. We
4946 * enable it and max out all of the settings since only one
4947 * device is sitting underneath and thus bandwidth sharing is
4950 * The register has several 3 bit fields, which indicates a
4951 * multiplier applied to the base amount of prefetching the
4952 * chip would do. These fields are at:
4954 * 15:13 --- ReRead Primary Bus
4955 * 12:10 --- FirstRead Primary Bus
4956 * 09:07 --- ReRead Secondary Bus
4957 * 06:04 --- FirstRead Secondary Bus
4959 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4960 * get enabled on. Bit 3 is a grouped enabler which controls
4961 * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
4962 * the individual REQ/GNT pairs [2:0].
4964 pci_write_config_word(pdev, 0x52,
4971 /* Force cacheline size to 0x8 */
4972 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4974 /* Force latency timer to maximum setting so Cassini can
4975 * sit on the bus as long as it likes.
4977 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4980 static const struct net_device_ops cas_netdev_ops = {
4981 .ndo_open = cas_open,
4982 .ndo_stop = cas_close,
4983 .ndo_start_xmit = cas_start_xmit,
4984 .ndo_get_stats = cas_get_stats,
4985 .ndo_set_multicast_list = cas_set_multicast,
4986 .ndo_do_ioctl = cas_ioctl,
4987 .ndo_tx_timeout = cas_tx_timeout,
4988 .ndo_change_mtu = cas_change_mtu,
4989 .ndo_set_mac_address = eth_mac_addr,
4990 .ndo_validate_addr = eth_validate_addr,
4991 #ifdef CONFIG_NET_POLL_CONTROLLER
4992 .ndo_poll_controller = cas_netpoll,
4996 static int __devinit cas_init_one(struct pci_dev *pdev,
4997 const struct pci_device_id *ent)
4999 static int cas_version_printed = 0;
5000 unsigned long casreg_len;
5001 struct net_device *dev;
5003 int i, err, pci_using_dac;
5005 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
5007 if (cas_version_printed++ == 0)
5008 printk(KERN_INFO "%s", version);
5010 err = pci_enable_device(pdev);
5012 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
5016 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5017 dev_err(&pdev->dev, "Cannot find proper PCI device "
5018 "base address, aborting.\n");
5020 goto err_out_disable_pdev;
5023 dev = alloc_etherdev(sizeof(*cp));
5025 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
5027 goto err_out_disable_pdev;
5029 SET_NETDEV_DEV(dev, &pdev->dev);
5031 err = pci_request_regions(pdev, dev->name);
5033 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5034 goto err_out_free_netdev;
5036 pci_set_master(pdev);
5038 /* we must always turn on parity response or else parity
5039 * doesn't get generated properly. disable SERR/PERR as well.
5040 * in addition, we want to turn MWI on.
5042 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5043 pci_cmd &= ~PCI_COMMAND_SERR;
5044 pci_cmd |= PCI_COMMAND_PARITY;
5045 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5046 if (pci_try_set_mwi(pdev))
5047 printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
5050 cas_program_bridge(pdev);
5053 * On some architectures, the default cache line size set
5054 * by pci_try_set_mwi reduces perforamnce. We have to increase
5055 * it for this case. To start, we'll print some configuration
5059 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5060 &orig_cacheline_size);
5061 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
5062 cas_cacheline_size =
5063 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
5064 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
5065 if (pci_write_config_byte(pdev,
5066 PCI_CACHE_LINE_SIZE,
5067 cas_cacheline_size)) {
5068 dev_err(&pdev->dev, "Could not set PCI cache "
5070 goto err_write_cacheline;
5076 /* Configure DMA attributes. */
5077 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5079 err = pci_set_consistent_dma_mask(pdev,
5082 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5083 "for consistent allocations\n");
5084 goto err_out_free_res;
5088 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5090 dev_err(&pdev->dev, "No usable DMA configuration, "
5092 goto err_out_free_res;
5097 casreg_len = pci_resource_len(pdev, 0);
5099 cp = netdev_priv(dev);
5102 /* A value of 0 indicates we never explicitly set it */
5103 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5106 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5109 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5110 cp->link_transition_jiffies_valid = 0;
5112 spin_lock_init(&cp->lock);
5113 spin_lock_init(&cp->rx_inuse_lock);
5114 spin_lock_init(&cp->rx_spare_lock);
5115 for (i = 0; i < N_TX_RINGS; i++) {
5116 spin_lock_init(&cp->stat_lock[i]);
5117 spin_lock_init(&cp->tx_lock[i]);
5119 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5120 mutex_init(&cp->pm_mutex);
5122 init_timer(&cp->link_timer);
5123 cp->link_timer.function = cas_link_timer;
5124 cp->link_timer.data = (unsigned long) cp;
5127 /* Just in case the implementation of atomic operations
5128 * change so that an explicit initialization is necessary.
5130 atomic_set(&cp->reset_task_pending, 0);
5131 atomic_set(&cp->reset_task_pending_all, 0);
5132 atomic_set(&cp->reset_task_pending_spare, 0);
5133 atomic_set(&cp->reset_task_pending_mtu, 0);
5135 INIT_WORK(&cp->reset_task, cas_reset_task);
5137 /* Default link parameters */
5138 if (link_mode >= 0 && link_mode <= 6)
5139 cp->link_cntl = link_modes[link_mode];
5141 cp->link_cntl = BMCR_ANENABLE;
5142 cp->lstate = link_down;
5143 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5144 netif_carrier_off(cp->dev);
5145 cp->timer_ticks = 0;
5147 /* give us access to cassini registers */
5148 cp->regs = pci_iomap(pdev, 0, casreg_len);
5150 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
5151 goto err_out_free_res;
5153 cp->casreg_len = casreg_len;
5155 pci_save_state(pdev);
5156 cas_check_pci_invariants(cp);
5159 if (cas_check_invariants(cp))
5160 goto err_out_iounmap;
5161 if (cp->cas_flags & CAS_FLAG_SATURN)
5162 if (cas_saturn_firmware_init(cp))
5163 goto err_out_iounmap;
5165 cp->init_block = (struct cas_init_block *)
5166 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5168 if (!cp->init_block) {
5169 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
5170 goto err_out_iounmap;
5173 for (i = 0; i < N_TX_RINGS; i++)
5174 cp->init_txds[i] = cp->init_block->txds[i];
5176 for (i = 0; i < N_RX_DESC_RINGS; i++)
5177 cp->init_rxds[i] = cp->init_block->rxds[i];
5179 for (i = 0; i < N_RX_COMP_RINGS; i++)
5180 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5182 for (i = 0; i < N_RX_FLOWS; i++)
5183 skb_queue_head_init(&cp->rx_flows[i]);
5185 dev->netdev_ops = &cas_netdev_ops;
5186 dev->ethtool_ops = &cas_ethtool_ops;
5187 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5190 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5192 dev->irq = pdev->irq;
5195 /* Cassini features. */
5196 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5197 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5200 dev->features |= NETIF_F_HIGHDMA;
5202 if (register_netdev(dev)) {
5203 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5204 goto err_out_free_consistent;
5207 i = readl(cp->regs + REG_BIM_CFG);
5208 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5209 "Ethernet[%d] %pM\n", dev->name,
5210 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5211 (i & BIM_CFG_32BIT) ? "32" : "64",
5212 (i & BIM_CFG_66MHZ) ? "66" : "33",
5213 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5216 pci_set_drvdata(pdev, dev);
5218 cas_entropy_reset(cp);
5220 cas_begin_auto_negotiation(cp, NULL);
5223 err_out_free_consistent:
5224 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5225 cp->init_block, cp->block_dvma);
5228 mutex_lock(&cp->pm_mutex);
5231 mutex_unlock(&cp->pm_mutex);
5233 pci_iounmap(pdev, cp->regs);
5237 pci_release_regions(pdev);
5239 err_write_cacheline:
5240 /* Try to restore it in case the error occured after we
5243 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5245 err_out_free_netdev:
5248 err_out_disable_pdev:
5249 pci_disable_device(pdev);
5250 pci_set_drvdata(pdev, NULL);
5254 static void __devexit cas_remove_one(struct pci_dev *pdev)
5256 struct net_device *dev = pci_get_drvdata(pdev);
5261 cp = netdev_priv(dev);
5262 unregister_netdev(dev);
5267 mutex_lock(&cp->pm_mutex);
5268 flush_scheduled_work();
5271 mutex_unlock(&cp->pm_mutex);
5274 if (cp->orig_cacheline_size) {
5275 /* Restore the cache line size if we had modified
5278 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5279 cp->orig_cacheline_size);
5282 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5283 cp->init_block, cp->block_dvma);
5284 pci_iounmap(pdev, cp->regs);
5286 pci_release_regions(pdev);
5287 pci_disable_device(pdev);
5288 pci_set_drvdata(pdev, NULL);
5292 static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5294 struct net_device *dev = pci_get_drvdata(pdev);
5295 struct cas *cp = netdev_priv(dev);
5296 unsigned long flags;
5298 mutex_lock(&cp->pm_mutex);
5300 /* If the driver is opened, we stop the DMA */
5302 netif_device_detach(dev);
5304 cas_lock_all_save(cp, flags);
5306 /* We can set the second arg of cas_reset to 0
5307 * because on resume, we'll call cas_init_hw with
5308 * its second arg set so that autonegotiation is
5312 cas_clean_rings(cp);
5313 cas_unlock_all_restore(cp, flags);
5318 mutex_unlock(&cp->pm_mutex);
5323 static int cas_resume(struct pci_dev *pdev)
5325 struct net_device *dev = pci_get_drvdata(pdev);
5326 struct cas *cp = netdev_priv(dev);
5328 printk(KERN_INFO "%s: resuming\n", dev->name);
5330 mutex_lock(&cp->pm_mutex);
5333 unsigned long flags;
5334 cas_lock_all_save(cp, flags);
5337 cas_clean_rings(cp);
5339 cas_unlock_all_restore(cp, flags);
5341 netif_device_attach(dev);
5343 mutex_unlock(&cp->pm_mutex);
5346 #endif /* CONFIG_PM */
5348 static struct pci_driver cas_driver = {
5349 .name = DRV_MODULE_NAME,
5350 .id_table = cas_pci_tbl,
5351 .probe = cas_init_one,
5352 .remove = __devexit_p(cas_remove_one),
5354 .suspend = cas_suspend,
5355 .resume = cas_resume
5359 static int __init cas_init(void)
5361 if (linkdown_timeout > 0)
5362 link_transition_timeout = linkdown_timeout * HZ;
5364 link_transition_timeout = 0;
5366 return pci_register_driver(&cas_driver);
5369 static void __exit cas_cleanup(void)
5371 pci_unregister_driver(&cas_driver);
5374 module_init(cas_init);
5375 module_exit(cas_cleanup);