Merge ../linux-2.6
[pandora-kernel.git] / drivers / net / bnx2.c
index 702d546..652eb05 100644 (file)
@@ -9,7 +9,6 @@
  * Written by: Michael Chan  (mchan@broadcom.com)
  */
 
-#include <linux/config.h>
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -57,8 +56,8 @@
 
 #define DRV_MODULE_NAME                "bnx2"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.4.42"
-#define DRV_MODULE_RELDATE     "June 12, 2006"
+#define DRV_MODULE_VERSION     "1.4.44"
+#define DRV_MODULE_RELDATE     "August 10, 2006"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -210,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
 {
-       u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
+       u32 diff;
 
+       smp_mb();
+       diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
        if (diff > MAX_TX_DESC_CNT)
                diff = (diff & MAX_TX_DESC_CNT) - 1;
        return (bp->tx_ring_size - diff);
@@ -1570,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
        struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
        unsigned long align;
 
-       skb = dev_alloc_skb(bp->rx_buf_size);
+       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
        if (skb == NULL) {
                return -ENOMEM;
        }
@@ -1579,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
                skb_reserve(skb, 8 - align);
        }
 
-       skb->dev = bp->dev;
        mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
                PCI_DMA_FROMDEVICE);
 
@@ -1640,7 +1640,7 @@ bnx2_tx_int(struct bnx2 *bp)
                skb = tx_buf->skb;
 #ifdef BCM_TSO 
                /* partial BD completions possible with TSO packets */
-               if (skb_shinfo(skb)->tso_size) {
+               if (skb_is_gso(skb)) {
                        u16 last_idx, last_ring_idx;
 
                        last_idx = sw_cons +
@@ -1676,7 +1676,7 @@ bnx2_tx_int(struct bnx2 *bp)
 
                tx_free_bd += last + 1;
 
-               dev_kfree_skb_irq(skb);
+               dev_kfree_skb(skb);
 
                hw_cons = bp->hw_tx_cons =
                        sblk->status_tx_quick_consumer_index0;
@@ -1687,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
        }
 
        bp->tx_cons = sw_cons;
+       /* Need to make the tx_cons update visible to bnx2_start_xmit()
+        * before checking for netif_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that bnx2_start_xmit()
+        * will miss it and cause the queue to be stopped forever.
+        */
+       smp_mb();
 
-       if (unlikely(netif_queue_stopped(bp->dev))) {
-               spin_lock(&bp->tx_lock);
+       if (unlikely(netif_queue_stopped(bp->dev)) &&
+                    (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
+               netif_tx_lock(bp->dev);
                if ((netif_queue_stopped(bp->dev)) &&
-                   (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
-
+                   (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
                        netif_wake_queue(bp->dev);
-               }
-               spin_unlock(&bp->tx_lock);
+               netif_tx_unlock(bp->dev);
        }
 }
 
@@ -1787,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
                if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
                        struct sk_buff *new_skb;
 
-                       new_skb = dev_alloc_skb(len + 2);
+                       new_skb = netdev_alloc_skb(bp->dev, len + 2);
                        if (new_skb == NULL)
                                goto reuse_rx;
 
@@ -1798,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
 
                        skb_reserve(new_skb, 2);
                        skb_put(new_skb, len);
-                       new_skb->dev = bp->dev;
 
                        bnx2_reuse_rx_skb(bp, skb,
                                sw_ring_cons, sw_ring_prod);
@@ -1824,7 +1828,7 @@ reuse_rx:
                if ((len > (bp->dev->mtu + ETH_HLEN)) &&
                        (ntohs(skb->protocol) != 0x8100)) {
 
-                       dev_kfree_skb_irq(skb);
+                       dev_kfree_skb(skb);
                        goto next_rx;
 
                }
@@ -3504,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
        struct tx_bd *txbd;
        u32 val;
 
+       bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
        txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
                
        txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3643,7 +3649,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                                skb_shinfo(skb)->frags[j].size,
                                PCI_DMA_TODEVICE);
                }
-               dev_kfree_skb_any(skb);
+               dev_kfree_skb(skb);
                i += j + 1;
        }
 
@@ -3669,7 +3675,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
 
                rx_buf->skb = NULL;
 
-               dev_kfree_skb_any(skb);
+               dev_kfree_skb(skb);
        }
 }
 
@@ -3953,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
                return -EINVAL;
 
        pkt_size = 1514;
-       skb = dev_alloc_skb(pkt_size);
+       skb = netdev_alloc_skb(bp->dev, pkt_size);
        if (!skb)
                return -ENOMEM;
        packet = skb_put(skb, pkt_size);
@@ -3999,7 +4005,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        udelay(5);
 
        pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
-       dev_kfree_skb_irq(skb);
+       dev_kfree_skb(skb);
 
        if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
                goto loopback_test_done;
@@ -4261,11 +4267,11 @@ bnx2_open(struct net_device *dev)
                }
                else {
                        rc = request_irq(bp->pdev->irq, bnx2_interrupt,
-                                       SA_SHIRQ, dev->name, dev);
+                                       IRQF_SHARED, dev->name, dev);
                }
        }
        else {
-               rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
+               rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
                                dev->name, dev);
        }
        if (rc) {
@@ -4312,7 +4318,7 @@ bnx2_open(struct net_device *dev)
 
                        if (!rc) {
                                rc = request_irq(bp->pdev->irq, bnx2_interrupt,
-                                       SA_SHIRQ, dev->name, dev);
+                                       IRQF_SHARED, dev->name, dev);
                        }
                        if (rc) {
                                bnx2_free_skbs(bp);
@@ -4391,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
 #endif
 
 /* Called with netif_tx_lock.
- * hard_start_xmit is pseudo-lockless - a lock is only required when
- * the tx queue is full. This way, we get the benefit of lockless
- * operations most of the time without the complexities to handle
- * netif_stop_queue/wake_queue race conditions.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
  */
 static int
 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4428,7 +4432,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
        }
 #ifdef BCM_TSO 
-       if ((mss = skb_shinfo(skb)->tso_size) &&
+       if ((mss = skb_shinfo(skb)->gso_size) &&
                (skb->len > (bp->dev->mtu + ETH_HLEN))) {
                u32 tcp_opt_len, ip_tcp_len;
 
@@ -4513,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dev->trans_start = jiffies;
 
        if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
-               spin_lock(&bp->tx_lock);
                netif_stop_queue(dev);
-               
-               if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
+               if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
                        netif_wake_queue(dev);
-               spin_unlock(&bp->tx_lock);
        }
 
        return NETDEV_TX_OK;
@@ -4541,7 +4542,7 @@ bnx2_close(struct net_device *dev)
        bnx2_netif_stop(bp);
        del_timer_sync(&bp->timer);
        if (bp->flags & NO_WOL_FLAG)
-               reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
+               reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
        else if (bp->wol)
                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
        else
@@ -5128,6 +5129,16 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
        return 0;
 }
 
+static int
+bnx2_set_tso(struct net_device *dev, u32 data)
+{
+       if (data)
+               dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
+       else
+               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
+       return 0;
+}
+
 #define BNX2_NUM_STATS 46
 
 static struct {
@@ -5445,7 +5456,7 @@ static struct ethtool_ops bnx2_ethtool_ops = {
        .set_sg                 = ethtool_op_set_sg,
 #ifdef BCM_TSO
        .get_tso                = ethtool_op_get_tso,
-       .set_tso                = ethtool_op_set_tso,
+       .set_tso                = bnx2_set_tso,
 #endif
        .self_test_count        = bnx2_self_test_count,
        .self_test              = bnx2_self_test,
@@ -5566,20 +5577,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        /* enable device (incl. PCI PM wakeup), and bus-mastering */
        rc = pci_enable_device(pdev);
        if (rc) {
-               printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
                goto err_out;
        }
 
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               printk(KERN_ERR PFX "Cannot find PCI device base address, "
-                      "aborting.\n");
+               dev_err(&pdev->dev,
+                       "Cannot find PCI device base address, aborting.\n");
                rc = -ENODEV;
                goto err_out_disable;
        }
 
        rc = pci_request_regions(pdev, DRV_MODULE_NAME);
        if (rc) {
-               printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
+               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
                goto err_out_disable;
        }
 
@@ -5587,15 +5598,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
        if (bp->pm_cap == 0) {
-               printk(KERN_ERR PFX "Cannot find power management capability, "
-                              "aborting.\n");
+               dev_err(&pdev->dev,
+                       "Cannot find power management capability, aborting.\n");
                rc = -EIO;
                goto err_out_release;
        }
 
        bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
        if (bp->pcix_cap == 0) {
-               printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
+               dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
                rc = -EIO;
                goto err_out_release;
        }
@@ -5603,14 +5614,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
                bp->flags |= USING_DAC_FLAG;
                if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
-                       printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
-                              "failed, aborting.\n");
+                       dev_err(&pdev->dev,
+                               "pci_set_consistent_dma_mask failed, aborting.\n");
                        rc = -EIO;
                        goto err_out_release;
                }
        }
        else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
-               printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
+               dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
                rc = -EIO;
                goto err_out_release;
        }
@@ -5619,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->pdev = pdev;
 
        spin_lock_init(&bp->phy_lock);
-       spin_lock_init(&bp->tx_lock);
        INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5630,7 +5640,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->regview = ioremap_nocache(dev->base_addr, mem_len);
 
        if (!bp->regview) {
-               printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
+               dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
                rc = -ENOMEM;
                goto err_out_release;
        }
@@ -5702,8 +5712,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
                !(bp->flags & PCIX_FLAG)) {
 
-               printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
-                      "aborting.\n");
+               dev_err(&pdev->dev,
+                       "5706 A1 can only be used in a PCIX bus, aborting.\n");
                goto err_out_unmap;
        }
 
@@ -5724,7 +5734,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
            BNX2_DEV_INFO_SIGNATURE_MAGIC) {
-               printk(KERN_ERR PFX "Firmware not running, aborting.\n");
+               dev_err(&pdev->dev, "Firmware not running, aborting.\n");
                rc = -ENODEV;
                goto err_out_unmap;
        }
@@ -5742,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->mac_addr[5] = (u8) reg;
 
        bp->tx_ring_size = MAX_TX_DESC_CNT;
-       bnx2_set_rx_ring_size(bp, 100);
+       bnx2_set_rx_ring_size(bp, 255);
 
        bp->rx_csum = 1;
 
@@ -5886,7 +5896,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 #endif
 
        if ((rc = register_netdev(dev))) {
-               printk(KERN_ERR PFX "Cannot register net device\n");
+               dev_err(&pdev->dev, "Cannot register net device\n");
                if (bp->regview)
                        iounmap(bp->regview);
                pci_release_regions(pdev);
@@ -5926,7 +5936,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 #endif
 #ifdef BCM_TSO
-       dev->features |= NETIF_F_TSO;
+       dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
 #endif
 
        netif_carrier_off(bp->dev);
@@ -5968,7 +5978,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
        netif_device_detach(dev);
        del_timer_sync(&bp->timer);
        if (bp->flags & NO_WOL_FLAG)
-               reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
+               reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
        else if (bp->wol)
                reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
        else