Merge branch 'upstream-fixes' of git://lost.foo-projects.org/~ahkok/git/netdev-2...
[pandora-kernel.git] / drivers / net / e1000 / e1000_main.c
index c44ed6f..0cf9ff2 100644 (file)
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
+#define DRV_VERSION "7.1.9-k6"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -48,7 +48,6 @@ static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  */
 static struct pci_device_id e1000_pci_tbl[] = {
-       INTEL_E1000_ETHERNET_DEVICE(0x1000),
        INTEL_E1000_ETHERNET_DEVICE(0x1001),
        INTEL_E1000_ETHERNET_DEVICE(0x1004),
        INTEL_E1000_ETHERNET_DEVICE(0x1008),
@@ -73,6 +72,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x1026),
        INTEL_E1000_ETHERNET_DEVICE(0x1027),
        INTEL_E1000_ETHERNET_DEVICE(0x1028),
+       INTEL_E1000_ETHERNET_DEVICE(0x1049),
+       INTEL_E1000_ETHERNET_DEVICE(0x104A),
+       INTEL_E1000_ETHERNET_DEVICE(0x104B),
+       INTEL_E1000_ETHERNET_DEVICE(0x104C),
+       INTEL_E1000_ETHERNET_DEVICE(0x104D),
        INTEL_E1000_ETHERNET_DEVICE(0x105E),
        INTEL_E1000_ETHERNET_DEVICE(0x105F),
        INTEL_E1000_ETHERNET_DEVICE(0x1060),
@@ -96,6 +100,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x109A),
        INTEL_E1000_ETHERNET_DEVICE(0x10B5),
        INTEL_E1000_ETHERNET_DEVICE(0x10B9),
+       INTEL_E1000_ETHERNET_DEVICE(0x10BA),
+       INTEL_E1000_ETHERNET_DEVICE(0x10BB),
        /* required last entry */
        {0,}
 };
@@ -238,7 +244,7 @@ e1000_init_module(void)
 
        printk(KERN_INFO "%s\n", e1000_copyright);
 
-       ret = pci_module_init(&e1000_driver);
+       ret = pci_register_driver(&e1000_driver);
 
        return ret;
 }
@@ -265,7 +271,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        int flags, err = 0;
 
-       flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
+       flags = IRQF_SHARED;
 #ifdef CONFIG_PCI_MSI
        if (adapter->hw.mac_type > e1000_82547_rev_2) {
                adapter->have_msi = TRUE;
@@ -276,7 +282,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
                }
        }
        if (adapter->have_msi)
-               flags &= ~SA_SHIRQ;
+               flags &= ~IRQF_SHARED;
 #endif
        if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
                               netdev->name, netdev)))
@@ -366,6 +372,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
 {
        uint32_t ctrl_ext;
        uint32_t swsm;
+       uint32_t extcnf;
 
        /* Let firmware taken over control of h/w */
        switch (adapter->hw.mac_type) {
@@ -380,6 +387,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
                swsm = E1000_READ_REG(&adapter->hw, SWSM);
                E1000_WRITE_REG(&adapter->hw, SWSM,
                                swsm & ~E1000_SWSM_DRV_LOAD);
+       case e1000_ich8lan:
+               extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
+               break;
        default:
                break;
        }
@@ -401,6 +413,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
 {
        uint32_t ctrl_ext;
        uint32_t swsm;
+       uint32_t extcnf;
        /* Let firmware know the driver has taken over */
        switch (adapter->hw.mac_type) {
        case e1000_82571:
@@ -415,6 +428,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
                E1000_WRITE_REG(&adapter->hw, SWSM,
                                swsm | E1000_SWSM_DRV_LOAD);
                break;
+       case e1000_ich8lan:
+               extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
+               E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
+                               extcnf | E1000_EXTCNF_CTRL_SWFLAG);
+               break;
        default:
                break;
        }
@@ -466,7 +484,7 @@ e1000_up(struct e1000_adapter *adapter)
  *
  **/
 
-static void e1000_power_up_phy(struct e1000_adapter *adapter)
+void e1000_power_up_phy(struct e1000_adapter *adapter)
 {
        uint16_t mii_reg = 0;
 
@@ -490,6 +508,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
         * (b) AMT is active
         * (c) SoL/IDER session is active */
        if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+           adapter->hw.mac_type != e1000_ich8lan &&
            adapter->hw.media_type == e1000_media_type_copper &&
            !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
            !mng_mode_enabled &&
@@ -561,6 +580,9 @@ e1000_reset(struct e1000_adapter *adapter)
        case e1000_82573:
                pba = E1000_PBA_12K;
                break;
+       case e1000_ich8lan:
+               pba = E1000_PBA_8K;
+               break;
        default:
                pba = E1000_PBA_48K;
                break;
@@ -585,6 +607,12 @@ e1000_reset(struct e1000_adapter *adapter)
        /* Set the FC high water mark to 90% of the FIFO size.
         * Required to clear last 3 LSB */
        fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+       /* We can't use 90% on small FIFOs because the remainder
+        * would be less than 1 full frame.  In this case, we size
+        * it to allow at least a full frame above the high water
+        *  mark. */
+       if (pba < E1000_PBA_16K)
+               fc_high_water_mark = (pba * 1024) - 1600;
 
        adapter->hw.fc_high_water = fc_high_water_mark;
        adapter->hw.fc_low_water = fc_high_water_mark - 8;
@@ -622,6 +650,8 @@ e1000_reset(struct e1000_adapter *adapter)
                                    phy_data);
        }
 
+       if (adapter->hw.mac_type < e1000_ich8lan)
+       /* FIXME: this code is duplicate and wrong for PCI Express */
        if (adapter->en_mng_pt) {
                manc = E1000_READ_REG(&adapter->hw, MANC);
                manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
@@ -648,6 +678,7 @@ e1000_probe(struct pci_dev *pdev,
        struct net_device *netdev;
        struct e1000_adapter *adapter;
        unsigned long mmio_start, mmio_len;
+       unsigned long flash_start, flash_len;
 
        static int cards_found = 0;
        static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
@@ -657,10 +688,12 @@ e1000_probe(struct pci_dev *pdev,
        if ((err = pci_enable_device(pdev)))
                return err;
 
-       if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+       if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
+           !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
                pci_using_dac = 1;
        } else {
-               if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+               if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
+                   (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
                        E1000_ERR("No usable DMA configuration, aborting\n");
                        return err;
                }
@@ -740,6 +773,19 @@ e1000_probe(struct pci_dev *pdev,
        if ((err = e1000_sw_init(adapter)))
                goto err_sw_init;
 
+       /* Flash BAR mapping must happen after e1000_sw_init
+        * because it depends on mac_type */
+       if ((adapter->hw.mac_type == e1000_ich8lan) &&
+          (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+               flash_start = pci_resource_start(pdev, 1);
+               flash_len = pci_resource_len(pdev, 1);
+               adapter->hw.flash_address = ioremap(flash_start, flash_len);
+               if (!adapter->hw.flash_address) {
+                       err = -EIO;
+                       goto err_flashmap;
+               }
+       }
+
        if ((err = e1000_check_phy_reset_block(&adapter->hw)))
                DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
 
@@ -758,6 +804,8 @@ e1000_probe(struct pci_dev *pdev,
                                   NETIF_F_HW_VLAN_TX |
                                   NETIF_F_HW_VLAN_RX |
                                   NETIF_F_HW_VLAN_FILTER;
+               if (adapter->hw.mac_type == e1000_ich8lan)
+                       netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
        }
 
 #ifdef NETIF_F_TSO
@@ -773,11 +821,17 @@ e1000_probe(struct pci_dev *pdev,
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-       /* hard_start_xmit is safe against parallel locking */
        netdev->features |= NETIF_F_LLTX;
 
        adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
 
+       /* initialize eeprom parameters */
+
+       if (e1000_init_eeprom_params(&adapter->hw)) {
+               E1000_ERR("EEPROM initialization failed\n");
+               return -EIO;
+       }
+
        /* before reading the EEPROM, reset the controller to
         * put the device in a known good starting state */
 
@@ -845,6 +899,11 @@ e1000_probe(struct pci_dev *pdev,
                        EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
                eeprom_apme_mask = E1000_EEPROM_82544_APM;
                break;
+       case e1000_ich8lan:
+               e1000_read_eeprom(&adapter->hw,
+                       EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+               eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+               break;
        case e1000_82546:
        case e1000_82546_rev_3:
        case e1000_82571:
@@ -904,6 +963,9 @@ e1000_probe(struct pci_dev *pdev,
        return 0;
 
 err_register:
+       if (adapter->hw.flash_address)
+               iounmap(adapter->hw.flash_address);
+err_flashmap:
 err_sw_init:
 err_eeprom:
        iounmap(adapter->hw.hw_addr);
@@ -937,6 +999,7 @@ e1000_remove(struct pci_dev *pdev)
        flush_scheduled_work();
 
        if (adapter->hw.mac_type >= e1000_82540 &&
+          adapter->hw.mac_type != e1000_ich8lan &&
           adapter->hw.media_type == e1000_media_type_copper) {
                manc = E1000_READ_REG(&adapter->hw, MANC);
                if (manc & E1000_MANC_SMBUS_EN) {
@@ -965,6 +1028,8 @@ e1000_remove(struct pci_dev *pdev)
 #endif
 
        iounmap(adapter->hw.hw_addr);
+       if (adapter->hw.flash_address)
+               iounmap(adapter->hw.flash_address);
        pci_release_regions(pdev);
 
        free_netdev(netdev);
@@ -1002,7 +1067,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
 
        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
-       adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+       adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
        adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
        hw->max_frame_size = netdev->mtu +
                             ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -1015,13 +1080,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
                return -EIO;
        }
 
-       /* initialize eeprom parameters */
-
-       if (e1000_init_eeprom_params(hw)) {
-               E1000_ERR("EEPROM initialization failed\n");
-               return -EIO;
-       }
-
        switch (hw->mac_type) {
        default:
                break;
@@ -1257,8 +1315,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
        int size;
 
        size = sizeof(struct e1000_buffer) * txdr->count;
-
-       txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
+       txdr->buffer_info = vmalloc(size);
        if (!txdr->buffer_info) {
                DPRINTK(PROBE, ERR,
                "Unable to allocate memory for the transmit descriptor ring\n");
@@ -1441,8 +1498,6 @@ e1000_configure_tx(struct e1000_adapter *adapter)
        } else if (hw->mac_type == e1000_80003es2lan) {
                tarc = E1000_READ_REG(hw, TARC0);
                tarc |= 1;
-               if (hw->media_type == e1000_media_type_internal_serdes)
-                       tarc |= (1 << 20);
                E1000_WRITE_REG(hw, TARC0, tarc);
                tarc = E1000_READ_REG(hw, TARC1);
                tarc |= 1;
@@ -1486,7 +1541,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
        int size, desc_len;
 
        size = sizeof(struct e1000_buffer) * rxdr->count;
-       rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
+       rxdr->buffer_info = vmalloc(size);
        if (!rxdr->buffer_info) {
                DPRINTK(PROBE, ERR,
                "Unable to allocate memory for the receive descriptor ring\n");
@@ -1628,9 +1683,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
                (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
-       if (adapter->hw.mac_type > e1000_82543)
-               rctl |= E1000_RCTL_SECRC;
-
        if (adapter->hw.tbi_compatibility_on == 1)
                rctl |= E1000_RCTL_SBP;
        else
@@ -1696,7 +1748,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                rfctl |= E1000_RFCTL_IPV6_DIS;
                E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
 
-               rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
+               rctl |= E1000_RCTL_DTYP_PS;
 
                psrctl |= adapter->rx_ps_bsize0 >>
                        E1000_PSRCTL_BSIZE0_SHIFT;
@@ -1809,9 +1861,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
                E1000_WRITE_REG(hw, RXCSUM, rxcsum);
        }
 
-       if (hw->mac_type == e1000_82573)
-               E1000_WRITE_REG(hw, ERT, 0x0100);
-
        /* Enable Receives */
        E1000_WRITE_REG(hw, RCTL, rctl);
 }
@@ -2151,6 +2200,12 @@ e1000_set_multi(struct net_device *netdev)
        uint32_t rctl;
        uint32_t hash_value;
        int i, rar_entries = E1000_RAR_ENTRIES;
+       int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+                               E1000_NUM_MTA_REGISTERS_ICH8LAN :
+                               E1000_NUM_MTA_REGISTERS;
+
+       if (adapter->hw.mac_type == e1000_ich8lan)
+               rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
 
        /* reserve RAR[14] for LAA over-write work-around */
        if (adapter->hw.mac_type == e1000_82571)
@@ -2197,7 +2252,7 @@ e1000_set_multi(struct net_device *netdev)
 
        /* clear the old settings from the multicast hash table */
 
-       for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) {
+       for (i = 0; i < mta_reg_count; i++) {
                E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
                E1000_WRITE_FLUSH(hw);
        }
@@ -2276,8 +2331,16 @@ e1000_watchdog(unsigned long data)
        struct net_device *netdev = adapter->netdev;
        struct e1000_tx_ring *txdr = adapter->tx_ring;
        uint32_t link, tctl;
-
-       e1000_check_for_link(&adapter->hw);
+       int32_t ret_val;
+
+       ret_val = e1000_check_for_link(&adapter->hw);
+       if ((ret_val == E1000_ERR_PHY) &&
+           (adapter->hw.phy_type == e1000_phy_igp_3) &&
+           (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+               /* See e1000_kumeran_lock_loss_workaround() */
+               DPRINTK(LINK, INFO,
+                       "Gigabit has been disabled, downgrading speed\n");
+       }
        if (adapter->hw.mac_type == e1000_82573) {
                e1000_enable_tx_pkt_filtering(&adapter->hw);
                if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2458,7 +2521,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
        uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
        int err;
 
-       if (skb_shinfo(skb)->tso_size) {
+       if (skb_is_gso(skb)) {
                if (skb_header_cloned(skb)) {
                        err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
                        if (err)
@@ -2466,7 +2529,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                }
 
                hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
-               mss = skb_shinfo(skb)->tso_size;
+               mss = skb_shinfo(skb)->gso_size;
                if (skb->protocol == htons(ETH_P_IP)) {
                        skb->nh.iph->tot_len = 0;
                        skb->nh.iph->check = 0;
@@ -2479,7 +2542,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb->h.raw - skb->data - 1;
 #ifdef NETIF_F_TSO_IPV6
-               } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
+               } else if (skb->protocol == htons(ETH_P_IPV6)) {
                        skb->nh.ipv6h->payload_len = 0;
                        skb->h.th->check =
                                ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2583,7 +2646,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                 * tso gets written back prematurely before the data is fully
                 * DMA'd to the controller */
                if (!skb->data_len && tx_ring->last_tx_tso &&
-                   !skb_shinfo(skb)->tso_size) {
+                   !skb_is_gso(skb)) {
                        tx_ring->last_tx_tso = 0;
                        size -= 4;
                }
@@ -2821,7 +2884,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
 #ifdef NETIF_F_TSO
-       mss = skb_shinfo(skb)->tso_size;
+       mss = skb_shinfo(skb)->gso_size;
        /* The controller does a simple calculation to
         * make sure there is enough room in the FIFO before
         * initiating the DMA for each buffer.  The calc is:
@@ -2843,6 +2906,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                        case e1000_82571:
                        case e1000_82572:
                        case e1000_82573:
+                       case e1000_ich8lan:
                                pull_size = min((unsigned int)4, skb->data_len);
                                if (!__pskb_pull_tail(skb, pull_size)) {
                                        DPRINTK(DRV, ERR,
@@ -2870,8 +2934,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 #ifdef NETIF_F_TSO
        /* Controller Erratum workaround */
-       if (!skb->data_len && tx_ring->last_tx_tso &&
-           !skb_shinfo(skb)->tso_size)
+       if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
                count++;
 #endif
 
@@ -3027,6 +3090,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
        /* Adapter-specific max frame size limits. */
        switch (adapter->hw.mac_type) {
        case e1000_undefined ... e1000_82542_rev2_1:
+       case e1000_ich8lan:
                if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
                        DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
                        return -EINVAL;
@@ -3060,7 +3124,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
                break;
        }
 
-       /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+       /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size
         * i.e. RXBUFFER_2048 --> size-4096 slab */
@@ -3081,7 +3145,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = E1000_RXBUFFER_16384;
 
        /* adjust allocation if LPE protects us, and we aren't using SBP */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
        if (!adapter->hw.tbi_compatibility_on &&
            ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3135,12 +3198,15 @@ e1000_update_stats(struct e1000_adapter *adapter)
        adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
        adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
        adapter->stats.roc += E1000_READ_REG(hw, ROC);
+
+       if (adapter->hw.mac_type != e1000_ich8lan) {
        adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
        adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
        adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
        adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
        adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
        adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+       }
 
        adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
        adapter->stats.mpc += E1000_READ_REG(hw, MPC);
@@ -3168,12 +3234,16 @@ e1000_update_stats(struct e1000_adapter *adapter)
        adapter->stats.totl += E1000_READ_REG(hw, TOTL);
        adapter->stats.toth += E1000_READ_REG(hw, TOTH);
        adapter->stats.tpr += E1000_READ_REG(hw, TPR);
+
+       if (adapter->hw.mac_type != e1000_ich8lan) {
        adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
        adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
        adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
        adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
        adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
        adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+       }
+
        adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
        adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
 
@@ -3195,6 +3265,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
        if (hw->mac_type > e1000_82547_rev_2) {
                adapter->stats.iac += E1000_READ_REG(hw, IAC);
                adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
+
+               if (adapter->hw.mac_type != e1000_ich8lan) {
                adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
                adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
                adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
@@ -3202,6 +3274,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
                adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
                adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
                adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+               }
        }
 
        /* Fill out the OS statistics structure */
@@ -3310,8 +3383,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
                E1000_WRITE_REG(hw, IMC, ~0);
                E1000_WRITE_FLUSH(hw);
        }
-       if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
-               __netif_rx_schedule(&adapter->polling_netdev[0]);
+       if (likely(netif_rx_schedule_prep(netdev)))
+               __netif_rx_schedule(netdev);
        else
                e1000_irq_enable(adapter);
 #else
@@ -3354,34 +3427,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 {
        struct e1000_adapter *adapter;
        int work_to_do = min(*budget, poll_dev->quota);
-       int tx_cleaned = 0, i = 0, work_done = 0;
+       int tx_cleaned = 0, work_done = 0;
 
        /* Must NOT use netdev_priv macro here. */
        adapter = poll_dev->priv;
 
        /* Keep link state information with original netdev */
-       if (!netif_carrier_ok(adapter->netdev))
+       if (!netif_carrier_ok(poll_dev))
                goto quit_polling;
 
-       while (poll_dev != &adapter->polling_netdev[i]) {
-               i++;
-               BUG_ON(i == adapter->num_rx_queues);
+       /* e1000_clean is called per-cpu.  This lock protects
+        * tx_ring[0] from being cleaned by multiple cpus
+        * simultaneously.  A failure obtaining the lock means
+        * tx_ring[0] is currently being cleaned anyway. */
+       if (spin_trylock(&adapter->tx_queue_lock)) {
+               tx_cleaned = e1000_clean_tx_irq(adapter,
+                                               &adapter->tx_ring[0]);
+               spin_unlock(&adapter->tx_queue_lock);
        }
 
-       if (likely(adapter->num_tx_queues == 1)) {
-               /* e1000_clean is called per-cpu.  This lock protects
-                * tx_ring[0] from being cleaned by multiple cpus
-                * simultaneously.  A failure obtaining the lock means
-                * tx_ring[0] is currently being cleaned anyway. */
-               if (spin_trylock(&adapter->tx_queue_lock)) {
-                       tx_cleaned = e1000_clean_tx_irq(adapter,
-                                                       &adapter->tx_ring[0]);
-                       spin_unlock(&adapter->tx_queue_lock);
-               }
-       } else
-               tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
-
-       adapter->clean_rx(adapter, &adapter->rx_ring[i],
+       adapter->clean_rx(adapter, &adapter->rx_ring[0],
                          &work_done, work_to_do);
 
        *budget -= work_done;
@@ -3389,7 +3454,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 
        /* If no Tx and not enough Rx work done, exit the polling mode */
        if ((!tx_cleaned && (work_done == 0)) ||
-          !netif_running(adapter->netdev)) {
+          !netif_running(poll_dev)) {
 quit_polling:
                netif_rx_complete(poll_dev);
                e1000_irq_enable(adapter);
@@ -3604,6 +3669,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                length = le16_to_cpu(rx_desc->length);
 
+               /* adjust length to remove Ethernet CRC */
+               length -= 4;
+
                if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
                        /* All receives must fit into a single buffer */
                        E1000_DBG("%s: Receive packet consumed multiple"
@@ -3637,7 +3705,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 #define E1000_CB_LENGTH 256
                if (length < E1000_CB_LENGTH) {
                        struct sk_buff *new_skb =
-                           dev_alloc_skb(length + NET_IP_ALIGN);
+                           netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
                        if (new_skb) {
                                skb_reserve(new_skb, NET_IP_ALIGN);
                                new_skb->dev = netdev;
@@ -3808,8 +3876,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                        pci_dma_sync_single_for_device(pdev,
                                ps_page_dma->ps_page_dma[0],
                                PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       /* remove the CRC */
+                       l1 -= 4;
                        skb_put(skb, l1);
-                       length += l1;
                        goto copydone;
                } /* if */
                }
@@ -3828,6 +3897,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                        skb->truesize += length;
                }
 
+               /* strip the ethernet crc, problem is we're using pages now so
+                * this whole operation can get a little cpu intensive */
+               pskb_trim(skb, skb->len - 4);
+
 copydone:
                e1000_rx_checksum(adapter, staterr,
                                  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -3903,7 +3976,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 
        while (cleaned_count--) {
                if (!(skb = buffer_info->skb))
-                       skb = dev_alloc_skb(bufsz);
+                       skb = netdev_alloc_skb(netdev, bufsz);
                else {
                        skb_trim(skb, 0);
                        goto map_skb;
@@ -3921,7 +3994,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                        DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
                                             "at %p\n", bufsz, skb->data);
                        /* Try again, without freeing the previous */
-                       skb = dev_alloc_skb(bufsz);
+                       skb = netdev_alloc_skb(netdev, bufsz);
                        /* Failed allocation, critical failure */
                        if (!skb) {
                                dev_kfree_skb(oldskb);
@@ -4045,7 +4118,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                                rx_desc->read.buffer_addr[j+1] = ~0;
                }
 
-               skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+               skb = netdev_alloc_skb(netdev,
+                                      adapter->rx_ps_bsize0 + NET_IP_ALIGN);
 
                if (unlikely(!skb)) {
                        adapter->alloc_rx_buff_failed++;
@@ -4309,11 +4383,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
        pci_write_config_word(adapter->pdev, reg, *value);
 }
 
+#if 0
 uint32_t
 e1000_io_read(struct e1000_hw *hw, unsigned long port)
 {
        return inl(port);
 }
+#endif  /*  0  */
 
 void
 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
@@ -4336,18 +4412,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                ctrl |= E1000_CTRL_VME;
                E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
 
+               if (adapter->hw.mac_type != e1000_ich8lan) {
                /* enable VLAN receive filtering */
                rctl = E1000_READ_REG(&adapter->hw, RCTL);
                rctl |= E1000_RCTL_VFE;
                rctl &= ~E1000_RCTL_CFIEN;
                E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
                e1000_update_mng_vlan(adapter);
+               }
        } else {
                /* disable VLAN tag insert/strip */
                ctrl = E1000_READ_REG(&adapter->hw, CTRL);
                ctrl &= ~E1000_CTRL_VME;
                E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
 
+               if (adapter->hw.mac_type != e1000_ich8lan) {
                /* disable VLAN filtering */
                rctl = E1000_READ_REG(&adapter->hw, RCTL);
                rctl &= ~E1000_RCTL_VFE;
@@ -4356,6 +4435,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                        e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
                }
+               }
        }
 
        e1000_irq_enable(adapter);
@@ -4584,7 +4664,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
                pci_enable_wake(pdev, PCI_D3cold, 0);
        }
 
+       /* FIXME: this code is incorrect for PCI Express */
        if (adapter->hw.mac_type >= e1000_82540 &&
+          adapter->hw.mac_type != e1000_ich8lan &&
           adapter->hw.media_type == e1000_media_type_copper) {
                manc = E1000_READ_REG(&adapter->hw, MANC);
                if (manc & E1000_MANC_SMBUS_EN) {
@@ -4595,6 +4677,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
                }
        }
 
+       if (adapter->hw.phy_type == e1000_phy_igp_3)
+               e1000_phy_powerdown_workaround(&adapter->hw);
+
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant. */
        e1000_release_hw_control(adapter);
@@ -4630,7 +4715,9 @@ e1000_resume(struct pci_dev *pdev)
 
        netif_device_attach(netdev);
 
+       /* FIXME: this code is incorrect for PCI Express */
        if (adapter->hw.mac_type >= e1000_82540 &&
+          adapter->hw.mac_type != e1000_ich8lan &&
           adapter->hw.media_type == e1000_media_type_copper) {
                manc = E1000_READ_REG(&adapter->hw, MANC);
                manc &= ~(E1000_MANC_ARP_EN);
@@ -4664,6 +4751,7 @@ static void
 e1000_netpoll(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+
        disable_irq(adapter->pdev->irq);
        e1000_intr(adapter->pdev->irq, netdev, NULL);
        e1000_clean_tx_irq(adapter, adapter->tx_ring);