Merge branch 'fixes' of git://git.linux-nfs.org/pub/linux/nfs-2.6
[pandora-kernel.git] / drivers / net / e1000 / e1000_main.c
index 6e7d31b..726f43d 100644 (file)
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "7.1.9-k2"DRIVERNAPI
+#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -283,7 +283,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
                }
        }
        if (adapter->have_msi)
-               flags &= ~SA_SHIRQ;
+               flags &= ~IRQF_SHARED;
 #endif
        if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
                               netdev->name, netdev)))
@@ -1068,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
 
        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
-       adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+       adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
        adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
        hw->max_frame_size = netdev->mtu +
                             ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -3127,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
                break;
        }
 
-       /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+       /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size
         * i.e. RXBUFFER_2048 --> size-4096 slab */
@@ -3148,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = E1000_RXBUFFER_16384;
 
        /* adjust allocation if LPE protects us, and we aren't using SBP */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
        if (!adapter->hw.tbi_compatibility_on &&
            ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3387,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
                E1000_WRITE_REG(hw, IMC, ~0);
                E1000_WRITE_FLUSH(hw);
        }
-       if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
-               __netif_rx_schedule(&adapter->polling_netdev[0]);
+       if (likely(netif_rx_schedule_prep(netdev)))
+               __netif_rx_schedule(netdev);
        else
                e1000_irq_enable(adapter);
 #else
@@ -3431,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 {
        struct e1000_adapter *adapter;
        int work_to_do = min(*budget, poll_dev->quota);
-       int tx_cleaned = 0, i = 0, work_done = 0;
+       int tx_cleaned = 0, work_done = 0;
 
        /* Must NOT use netdev_priv macro here. */
        adapter = poll_dev->priv;
 
        /* Keep link state information with original netdev */
-       if (!netif_carrier_ok(adapter->netdev))
+       if (!netif_carrier_ok(poll_dev))
                goto quit_polling;
 
-       while (poll_dev != &adapter->polling_netdev[i]) {
-               i++;
-               BUG_ON(i == adapter->num_rx_queues);
+       /* e1000_clean is called per-cpu.  This lock protects
+        * tx_ring[0] from being cleaned by multiple cpus
+        * simultaneously.  A failure obtaining the lock means
+        * tx_ring[0] is currently being cleaned anyway. */
+       if (spin_trylock(&adapter->tx_queue_lock)) {
+               tx_cleaned = e1000_clean_tx_irq(adapter,
+                                               &adapter->tx_ring[0]);
+               spin_unlock(&adapter->tx_queue_lock);
        }
 
-       if (likely(adapter->num_tx_queues == 1)) {
-               /* e1000_clean is called per-cpu.  This lock protects
-                * tx_ring[0] from being cleaned by multiple cpus
-                * simultaneously.  A failure obtaining the lock means
-                * tx_ring[0] is currently being cleaned anyway. */
-               if (spin_trylock(&adapter->tx_queue_lock)) {
-                       tx_cleaned = e1000_clean_tx_irq(adapter,
-                                                       &adapter->tx_ring[0]);
-                       spin_unlock(&adapter->tx_queue_lock);
-               }
-       } else
-               tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
-
-       adapter->clean_rx(adapter, &adapter->rx_ring[i],
+       adapter->clean_rx(adapter, &adapter->rx_ring[0],
                          &work_done, work_to_do);
 
        *budget -= work_done;
@@ -3466,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 
        /* If no Tx and not enough Rx work done, exit the polling mode */
        if ((!tx_cleaned && (work_done == 0)) ||
-          !netif_running(adapter->netdev)) {
+          !netif_running(poll_dev)) {
 quit_polling:
                netif_rx_complete(poll_dev);
                e1000_irq_enable(adapter);
@@ -3681,6 +3672,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                length = le16_to_cpu(rx_desc->length);
 
+               /* adjust length to remove Ethernet CRC */
+               length -= 4;
+
                if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
                        /* All receives must fit into a single buffer */
                        E1000_DBG("%s: Receive packet consumed multiple"
@@ -3714,7 +3708,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 #define E1000_CB_LENGTH 256
                if (length < E1000_CB_LENGTH) {
                        struct sk_buff *new_skb =
-                           dev_alloc_skb(length + NET_IP_ALIGN);
+                           netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
                        if (new_skb) {
                                skb_reserve(new_skb, NET_IP_ALIGN);
                                new_skb->dev = netdev;
@@ -3885,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                        pci_dma_sync_single_for_device(pdev,
                                ps_page_dma->ps_page_dma[0],
                                PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       /* remove the CRC */
+                       l1 -= 4;
                        skb_put(skb, l1);
-                       length += l1;
                        goto copydone;
                } /* if */
                }
@@ -3905,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                        skb->truesize += length;
                }
 
+               /* strip the ethernet crc, problem is we're using pages now so
+                * this whole operation can get a little cpu intensive */
+               pskb_trim(skb, skb->len - 4);
+
 copydone:
                e1000_rx_checksum(adapter, staterr,
                                  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -3980,7 +3979,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 
        while (cleaned_count--) {
                if (!(skb = buffer_info->skb))
-                       skb = dev_alloc_skb(bufsz);
+                       skb = netdev_alloc_skb(netdev, bufsz);
                else {
                        skb_trim(skb, 0);
                        goto map_skb;
@@ -3998,7 +3997,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                        DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
                                             "at %p\n", bufsz, skb->data);
                        /* Try again, without freeing the previous */
-                       skb = dev_alloc_skb(bufsz);
+                       skb = netdev_alloc_skb(netdev, bufsz);
                        /* Failed allocation, critical failure */
                        if (!skb) {
                                dev_kfree_skb(oldskb);
@@ -4122,7 +4121,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                                rx_desc->read.buffer_addr[j+1] = ~0;
                }
 
-               skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+               skb = netdev_alloc_skb(netdev,
+                                      adapter->rx_ps_bsize0 + NET_IP_ALIGN);
 
                if (unlikely(!skb)) {
                        adapter->alloc_rx_buff_failed++;
@@ -4386,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
        pci_write_config_word(adapter->pdev, reg, *value);
 }
 
+#if 0
 uint32_t
 e1000_io_read(struct e1000_hw *hw, unsigned long port)
 {
        return inl(port);
 }
+#endif  /*  0  */
 
 void
 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
@@ -4752,6 +4754,7 @@ static void
 e1000_netpoll(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+
        disable_irq(adapter->pdev->irq);
        e1000_intr(adapter->pdev->irq, netdev, NULL);
        e1000_clean_tx_irq(adapter, adapter->tx_ring);