Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
[pandora-kernel.git] / drivers / net / sundance.c
index c243a80..f51ba31 100644 (file)
@@ -23,8 +23,8 @@
 */
 
 #define DRV_NAME       "sundance"
-#define DRV_VERSION    "1.1"
-#define DRV_RELDATE    "27-Jun-2006"
+#define DRV_VERSION    "1.2"
+#define DRV_RELDATE    "11-Sep-2006"
 
 
 /* The user-configurable values.
@@ -264,8 +264,6 @@ enum alta_offsets {
        ASICCtrl = 0x30,
        EEData = 0x34,
        EECtrl = 0x36,
-       TxStartThresh = 0x3c,
-       RxEarlyThresh = 0x3e,
        FlashAddr = 0x40,
        FlashData = 0x44,
        TxStatus = 0x46,
@@ -420,7 +418,7 @@ static void tx_timeout(struct net_device *dev);
 static void init_ring(struct net_device *dev);
 static int  start_tx(struct sk_buff *skb, struct net_device *dev);
 static int reset_tx (struct net_device *dev);
-static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
 static void rx_poll(unsigned long data);
 static void tx_poll(unsigned long data);
 static void refill_rx (struct net_device *dev);
@@ -431,7 +429,7 @@ static int __set_mac_addr(struct net_device *dev);
 static struct net_device_stats *get_stats(struct net_device *dev);
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static int  netdev_close(struct net_device *dev);
-static struct ethtool_ops ethtool_ops;
+static const struct ethtool_ops ethtool_ops;
 
 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 {
@@ -790,6 +788,7 @@ static int netdev_open(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base;
+       unsigned long flags;
        int i;
 
        /* Do we need to reset the chip??? */
@@ -834,6 +833,10 @@ static int netdev_open(struct net_device *dev)
                iowrite8(0x01, ioaddr + DebugCtrl1);
        netif_start_queue(dev);
 
+       spin_lock_irqsave(&np->lock, flags);
+       reset_tx(dev);
+       spin_unlock_irqrestore(&np->lock, flags);
+
        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 
        if (netif_msg_ifup(np))
@@ -907,7 +910,7 @@ static void tx_timeout(struct net_device *dev)
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base;
        unsigned long flag;
-       
+
        netif_stop_queue(dev);
        tasklet_disable(&np->tx_tasklet);
        iowrite16(0, ioaddr + IntrEnable);
@@ -924,13 +927,13 @@ static void tx_timeout(struct net_device *dev)
                                le32_to_cpu(np->tx_ring[i].next_desc),
                                le32_to_cpu(np->tx_ring[i].status),
                                (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
-                               le32_to_cpu(np->tx_ring[i].frag[0].addr), 
+                               le32_to_cpu(np->tx_ring[i].frag[0].addr),
                                le32_to_cpu(np->tx_ring[i].frag[0].length));
                }
-               printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", 
-                       ioread32(np->base + TxListPtr), 
+               printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+                       ioread32(np->base + TxListPtr),
                        netif_queue_stopped(dev));
-               printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", 
+               printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
@@ -1002,9 +1005,9 @@ static void tx_poll (unsigned long data)
        struct net_device *dev = (struct net_device *)data;
        struct netdev_private *np = netdev_priv(dev);
        unsigned head = np->cur_task % TX_RING_SIZE;
-       struct netdev_desc *txdesc = 
+       struct netdev_desc *txdesc =
                &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
-       
+
        /* Chain the next pointer */
        for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
                int entry = np->cur_task % TX_RING_SIZE;
@@ -1074,16 +1077,18 @@ reset_tx (struct net_device *dev)
        struct sk_buff *skb;
        int i;
        int irq = in_interrupt();
-       
+
        /* Reset tx logic, TxListPtr will be cleaned */
        iowrite16 (TxDisable, ioaddr + MACCtrl1);
        sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
 
        /* free all tx skbuff */
        for (i = 0; i < TX_RING_SIZE; i++) {
+               np->tx_ring[i].next_desc = 0;
+
                skb = np->tx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(np->pci_dev, 
+                       pci_unmap_single(np->pci_dev,
                                np->tx_ring[i].frag[0].addr, skb->len,
                                PCI_DMA_TODEVICE);
                        if (irq)
@@ -1096,13 +1101,17 @@ reset_tx (struct net_device *dev)
        }
        np->cur_tx = np->dirty_tx = 0;
        np->cur_task = 0;
+
+       np->last_tx = NULL;
+       iowrite8(127, ioaddr + TxDMAPollPeriod);
+
        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
        return 0;
 }
 
-/* The interrupt handler cleans up after the Tx thread, 
+/* The interrupt handler cleans up after the Tx thread,
    and schedule a Rx thread work */
-static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+static irqreturn_t intr_handler(int irq, void *dev_instance)
 {
        struct net_device *dev = (struct net_device *)dev_instance;
        struct netdev_private *np = netdev_priv(dev);
@@ -1111,6 +1120,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
        int tx_cnt;
        int tx_status;
        int handled = 0;
+       int i;
 
 
        do {
@@ -1153,21 +1163,24 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
                                                np->stats.tx_fifo_errors++;
                                        if (tx_status & 0x02)
                                                np->stats.tx_window_errors++;
+
                                        /*
                                        ** This reset has been verified on
                                        ** DFE-580TX boards ! phdm@macqel.be.
                                        */
                                        if (tx_status & 0x10) { /* TxUnderrun */
-                                               unsigned short txthreshold;
-
-                                               txthreshold = ioread16 (ioaddr + TxStartThresh);
                                                /* Restart Tx FIFO and transmitter */
                                                sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
-                                               iowrite16 (txthreshold, ioaddr + TxStartThresh);
                                                /* No need to reset the Tx pointer here */
                                        }
-                                       /* Restart the Tx. */
-                                       iowrite16 (TxEnable, ioaddr + MACCtrl1);
+                                       /* Restart the Tx. Need to make sure tx enabled */
+                                       i = 10;
+                                       do {
+                                               iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
+                                               if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
+                                                       break;
+                                               mdelay(1);
+                                       } while (--i);
                                }
                                /* Yup, this is a documentation bug.  It cost me *hours*. */
                                iowrite16 (0, ioaddr + TxStatus);
@@ -1181,8 +1194,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
                } else  {
                        hw_frame_id = ioread8(ioaddr + TxFrameId);
                }
-                       
-               if (np->pci_rev_id >= 0x14) {   
+
+               if (np->pci_rev_id >= 0x14) {
                        spin_lock(&np->lock);
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
                                int entry = np->dirty_tx % TX_RING_SIZE;
@@ -1194,7 +1207,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
                                        !(le32_to_cpu(np->tx_ring[entry].status)
                                        & 0x00010000))
                                                break;
-                               if (sw_frame_id == (hw_frame_id + 1) % 
+                               if (sw_frame_id == (hw_frame_id + 1) %
                                        TX_RING_SIZE)
                                                break;
                                skb = np->tx_skbuff[entry];
@@ -1213,7 +1226,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
                                int entry = np->dirty_tx % TX_RING_SIZE;
                                struct sk_buff *skb;
-                               if (!(le32_to_cpu(np->tx_ring[entry].status) 
+                               if (!(le32_to_cpu(np->tx_ring[entry].status)
                                                        & 0x00010000))
                                        break;
                                skb = np->tx_skbuff[entry];
@@ -1228,7 +1241,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
                        }
                        spin_unlock(&np->lock);
                }
-               
+
                if (netif_queue_stopped(dev) &&
                        np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
                        /* The ring is no longer full, clear busy flag. */
@@ -1295,7 +1308,6 @@ static void rx_poll(unsigned long data)
                           to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(np->pci_dev,
                                                            desc->frag[0].addr,
@@ -1464,8 +1476,6 @@ static void set_rx_mode(struct net_device *dev)
        int i;
 
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                memset(mc_filter, 0xff, sizeof(mc_filter));
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
        } else if ((dev->mc_count > multicast_filter_limit)
@@ -1571,7 +1581,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
        np->msg_enable = val;
 }
 
-static struct ethtool_ops ethtool_ops = {
+static const struct ethtool_ops ethtool_ops = {
        .begin = check_if_running,
        .get_drvinfo = get_drvinfo,
        .get_settings = get_settings,
@@ -1600,18 +1610,18 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                case SIOCDEVPRIVATE:
                for (i=0; i<TX_RING_SIZE; i++) {
                        printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
-                               (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), 
+                               (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
                                le32_to_cpu(np->tx_ring[i].next_desc),
                                le32_to_cpu(np->tx_ring[i].status),
-                               (le32_to_cpu(np->tx_ring[i].status) >> 2) 
+                               (le32_to_cpu(np->tx_ring[i].status) >> 2)
                                        & 0xff,
-                               le32_to_cpu(np->tx_ring[i].frag[0].addr), 
+                               le32_to_cpu(np->tx_ring[i].frag[0].addr),
                                le32_to_cpu(np->tx_ring[i].frag[0].length));
                }
-               printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", 
-                       ioread32(np->base + TxListPtr), 
+               printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+                       ioread32(np->base + TxListPtr),
                        netif_queue_stopped(dev));
-               printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", 
+               printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
@@ -1619,7 +1629,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
                        return 0;
        }
-                               
+
 
        return rc;
 }
@@ -1631,6 +1641,14 @@ static int netdev_close(struct net_device *dev)
        struct sk_buff *skb;
        int i;
 
+       /* Wait and kill tasklet */
+       tasklet_kill(&np->rx_tasklet);
+       tasklet_kill(&np->tx_tasklet);
+       np->cur_tx = 0;
+       np->dirty_tx = 0;
+       np->cur_task = 0;
+       np->last_tx = NULL;
+
        netif_stop_queue(dev);
 
        if (netif_msg_ifdown(np)) {
@@ -1645,12 +1663,26 @@ static int netdev_close(struct net_device *dev)
        /* Disable interrupts by clearing the interrupt mask. */
        iowrite16(0x0000, ioaddr + IntrEnable);
 
+       /* Disable Rx and Tx DMA for safely release resource */
+       iowrite32(0x500, ioaddr + DMACtrl);
+
        /* Stop the chip's Tx and Rx processes. */
        iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
 
-       /* Wait and kill tasklet */
-       tasklet_kill(&np->rx_tasklet);
-       tasklet_kill(&np->tx_tasklet);
+       for (i = 2000; i > 0; i--) {
+               if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+                       break;
+               mdelay(1);
+       }
+
+       iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+                       ioaddr +ASICCtrl + 2);
+
+       for (i = 2000; i > 0; i--) {
+               if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
+                       break;
+               mdelay(1);
+       }
 
 #ifdef __i386__
        if (netif_msg_hw(np)) {
@@ -1688,6 +1720,7 @@ static int netdev_close(struct net_device *dev)
                }
        }
        for (i = 0; i < TX_RING_SIZE; i++) {
+               np->tx_ring[i].next_desc = 0;
                skb = np->tx_skbuff[i];
                if (skb) {
                        pci_unmap_single(np->pci_dev,