X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fe1000_main.c;h=0cf9ff2462ba7757d677a30fa7e0562aaeed7ed9;hb=699a71238856b19091503c671bac8abb1e3f9a3a;hp=f77624f5f17bf33b0b23c3e2b6d31a25c795f0b2;hpb=6fa0cb1141da80eed4f86155fb51931bc1c31888;p=pandora-kernel.git diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f77624f5f17b..0cf9ff2462ba 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; #else #define DRIVERNAPI "-NAPI" #endif -#define DRV_VERSION "7.0.38-k4"DRIVERNAPI +#define DRV_VERSION "7.1.9-k6"DRIVERNAPI char e1000_driver_version[] = DRV_VERSION; static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -48,7 +48,6 @@ static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} */ static struct pci_device_id e1000_pci_tbl[] = { - INTEL_E1000_ETHERNET_DEVICE(0x1000), INTEL_E1000_ETHERNET_DEVICE(0x1001), INTEL_E1000_ETHERNET_DEVICE(0x1004), INTEL_E1000_ETHERNET_DEVICE(0x1008), @@ -73,6 +72,11 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x1026), INTEL_E1000_ETHERNET_DEVICE(0x1027), INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1049), + INTEL_E1000_ETHERNET_DEVICE(0x104A), + INTEL_E1000_ETHERNET_DEVICE(0x104B), + INTEL_E1000_ETHERNET_DEVICE(0x104C), + INTEL_E1000_ETHERNET_DEVICE(0x104D), INTEL_E1000_ETHERNET_DEVICE(0x105E), INTEL_E1000_ETHERNET_DEVICE(0x105F), INTEL_E1000_ETHERNET_DEVICE(0x1060), @@ -96,6 +100,8 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x109A), INTEL_E1000_ETHERNET_DEVICE(0x10B5), INTEL_E1000_ETHERNET_DEVICE(0x10B9), + INTEL_E1000_ETHERNET_DEVICE(0x10BA), + INTEL_E1000_ETHERNET_DEVICE(0x10BB), /* required last entry */ {0,} }; @@ -133,7 +139,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, static void e1000_set_multi(struct net_device *netdev); static void e1000_update_phy_info(unsigned long data); static void e1000_watchdog(unsigned long data); -static void e1000_watchdog_task(struct e1000_adapter *adapter); static void e1000_82547_tx_fifo_stall(unsigned long data); static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); static struct net_device_stats * e1000_get_stats(struct net_device *netdev); @@ -178,8 +183,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); -#ifdef CONFIG_PM static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); +#ifdef CONFIG_PM static int e1000_resume(struct pci_dev *pdev); #endif static void e1000_shutdown(struct pci_dev *pdev); @@ -206,8 +211,8 @@ static struct pci_driver e1000_driver = { .probe = e1000_probe, .remove = __devexit_p(e1000_remove), /* Power Managment Hooks */ -#ifdef CONFIG_PM .suspend = e1000_suspend, +#ifdef CONFIG_PM .resume = e1000_resume, #endif .shutdown = e1000_shutdown, @@ -239,7 +244,7 @@ e1000_init_module(void) printk(KERN_INFO "%s\n", e1000_copyright); - ret = pci_module_init(&e1000_driver); + ret = pci_register_driver(&e1000_driver); return ret; } @@ -261,6 +266,44 @@ e1000_exit_module(void) module_exit(e1000_exit_module); +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int flags, err = 0; + + flags = IRQF_SHARED; +#ifdef CONFIG_PCI_MSI + if (adapter->hw.mac_type > e1000_82547_rev_2) { + adapter->have_msi = TRUE; + if ((err = pci_enable_msi(adapter->pdev))) { + DPRINTK(PROBE, ERR, + "Unable to allocate MSI interrupt Error: %d\n", err); + adapter->have_msi = FALSE; + } + } + if (adapter->have_msi) + flags &= ~IRQF_SHARED; +#endif + if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, + netdev->name, netdev))) + DPRINTK(PROBE, ERR, + "Unable to allocate interrupt Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + +#ifdef CONFIG_PCI_MSI + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +#endif +} + /** * e1000_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure @@ -329,6 +372,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter) { uint32_t ctrl_ext; uint32_t swsm; + uint32_t extcnf; /* Let firmware taken over control of h/w */ switch (adapter->hw.mac_type) { @@ -343,6 +387,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter) swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + case e1000_ich8lan: + extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + extcnf & ~E1000_CTRL_EXT_DRV_LOAD); + break; default: break; } @@ -364,6 +413,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter) { uint32_t ctrl_ext; uint32_t swsm; + uint32_t extcnf; /* Let firmware know the driver has taken over */ switch (adapter->hw.mac_type) { case e1000_82571: @@ -378,6 +428,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter) E1000_WRITE_REG(&adapter->hw, SWSM, swsm | E1000_SWSM_DRV_LOAD); break; + case e1000_ich8lan: + extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL); + E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL, + extcnf | E1000_EXTCNF_CTRL_SWFLAG); + break; default: break; } @@ -387,18 +442,10 @@ int e1000_up(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int i, err; + int i; /* hardware has been reset, we need to reload some things */ - /* Reset the PHY if it was previously powered down */ - if (adapter->hw.media_type == e1000_media_type_copper) { - uint16_t mii_reg; - e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); - if (mii_reg & MII_CR_POWER_DOWN) - e1000_phy_hw_reset(&adapter->hw); - } - e1000_set_multi(netdev); e1000_restore_vlan(adapter); @@ -415,24 +462,6 @@ e1000_up(struct e1000_adapter *adapter) E1000_DESC_UNUSED(ring)); } -#ifdef CONFIG_PCI_MSI - if (adapter->hw.mac_type > e1000_82547_rev_2) { - adapter->have_msi = TRUE; - if ((err = pci_enable_msi(adapter->pdev))) { - DPRINTK(PROBE, ERR, - "Unable to allocate MSI interrupt Error: %d\n", err); - adapter->have_msi = FALSE; - } - } -#endif - if ((err = request_irq(adapter->pdev->irq, &e1000_intr, - IRQF_SHARED | IRQF_SAMPLE_RANDOM, - netdev->name, netdev))) { - DPRINTK(PROBE, ERR, - "Unable to allocate interrupt Error: %d\n", err); - return err; - } - adapter->tx_queue_len = netdev->tx_queue_len; mod_timer(&adapter->watchdog_timer, jiffies); @@ -445,21 +474,60 @@ e1000_up(struct e1000_adapter *adapter) return 0; } +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + * + **/ + +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + uint16_t mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (adapter->hw.media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle */ + e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && + e1000_check_mng_mode(&adapter->hw); + /* Power down the PHY so no link is implied when interface is down + * The PHY cannot be powered down if any of the following is TRUE + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active */ + if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && + adapter->hw.mac_type != e1000_ich8lan && + adapter->hw.media_type == e1000_media_type_copper && + !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && + !mng_mode_enabled && + !e1000_check_phy_reset_block(&adapter->hw)) { + uint16_t mii_reg = 0; + e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); + mdelay(1); + } +} + void e1000_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && - e1000_check_mng_mode(&adapter->hw); e1000_irq_disable(adapter); - free_irq(adapter->pdev->irq, netdev); -#ifdef CONFIG_PCI_MSI - if (adapter->hw.mac_type > e1000_82547_rev_2 && - adapter->have_msi == TRUE) - pci_disable_msi(adapter->pdev); -#endif del_timer_sync(&adapter->tx_fifo_stall_timer); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); @@ -476,23 +544,17 @@ e1000_down(struct e1000_adapter *adapter) e1000_reset(adapter); e1000_clean_all_tx_rings(adapter); e1000_clean_all_rx_rings(adapter); +} - /* Power down the PHY so no link is implied when interface is down * - * The PHY cannot be powered down if any of the following is TRUE * - * (a) WoL is enabled - * (b) AMT is active - * (c) SoL/IDER session is active */ - if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && - adapter->hw.media_type == e1000_media_type_copper && - !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && - !mng_mode_enabled && - !e1000_check_phy_reset_block(&adapter->hw)) { - uint16_t mii_reg; - e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); - mii_reg |= MII_CR_POWER_DOWN; - e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); - mdelay(1); - } +void +e1000_reinit_locked(struct e1000_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + e1000_down(adapter); + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); } void @@ -518,6 +580,9 @@ e1000_reset(struct e1000_adapter *adapter) case e1000_82573: pba = E1000_PBA_12K; break; + case e1000_ich8lan: + pba = E1000_PBA_8K; + break; default: pba = E1000_PBA_48K; break; @@ -542,6 +607,12 @@ e1000_reset(struct e1000_adapter *adapter) /* Set the FC high water mark to 90% of the FIFO size. * Required to clear last 3 LSB */ fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; + /* We can't use 90% on small FIFOs because the remainder + * would be less than 1 full frame. In this case, we size + * it to allow at least a full frame above the high water + * mark. */ + if (pba < E1000_PBA_16K) + fc_high_water_mark = (pba * 1024) - 1600; adapter->hw.fc_high_water = fc_high_water_mark; adapter->hw.fc_low_water = fc_high_water_mark - 8; @@ -564,6 +635,23 @@ e1000_reset(struct e1000_adapter *adapter) e1000_reset_adaptive(&adapter->hw); e1000_phy_get_info(&adapter->hw, &adapter->phy_info); + + if (!adapter->smart_power_down && + (adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572)) { + uint16_t phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed */ + e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, + &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, + phy_data); + } + + if (adapter->hw.mac_type < e1000_ich8lan) + /* FIXME: this code is duplicate and wrong for PCI Express */ if (adapter->en_mng_pt) { manc = E1000_READ_REG(&adapter->hw, MANC); manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); @@ -590,6 +678,7 @@ e1000_probe(struct pci_dev *pdev, struct net_device *netdev; struct e1000_adapter *adapter; unsigned long mmio_start, mmio_len; + unsigned long flash_start, flash_len; static int cards_found = 0; static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ @@ -599,10 +688,12 @@ e1000_probe(struct pci_dev *pdev, if ((err = pci_enable_device(pdev))) return err; - if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { + if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && + !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { pci_using_dac = 1; } else { - if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { + if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) && + (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { E1000_ERR("No usable DMA configuration, aborting\n"); return err; } @@ -682,6 +773,19 @@ e1000_probe(struct pci_dev *pdev, if ((err = e1000_sw_init(adapter))) goto err_sw_init; + /* Flash BAR mapping must happen after e1000_sw_init + * because it depends on mac_type */ + if ((adapter->hw.mac_type == e1000_ich8lan) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) { + err = -EIO; + goto err_flashmap; + } + } + if ((err = e1000_check_phy_reset_block(&adapter->hw))) DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); @@ -700,6 +804,8 @@ e1000_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + if (adapter->hw.mac_type == e1000_ich8lan) + netdev->features &= ~NETIF_F_HW_VLAN_FILTER; } #ifdef NETIF_F_TSO @@ -715,11 +821,17 @@ e1000_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - /* hard_start_xmit is safe against parallel locking */ netdev->features |= NETIF_F_LLTX; adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); + /* initialize eeprom parameters */ + + if (e1000_init_eeprom_params(&adapter->hw)) { + E1000_ERR("EEPROM initialization failed\n"); + return -EIO; + } + /* before reading the EEPROM, reset the controller to * put the device in a known good starting state */ @@ -758,9 +870,6 @@ e1000_probe(struct pci_dev *pdev, adapter->watchdog_timer.function = &e1000_watchdog; adapter->watchdog_timer.data = (unsigned long) adapter; - INIT_WORK(&adapter->watchdog_task, - (void (*)(void *))e1000_watchdog_task, adapter); - init_timer(&adapter->phy_info_timer); adapter->phy_info_timer.function = &e1000_update_phy_info; adapter->phy_info_timer.data = (unsigned long) adapter; @@ -790,6 +899,11 @@ e1000_probe(struct pci_dev *pdev, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); eeprom_apme_mask = E1000_EEPROM_82544_APM; break; + case e1000_ich8lan: + e1000_read_eeprom(&adapter->hw, + EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_ICH8_APME; + break; case e1000_82546: case e1000_82546_rev_3: case e1000_82571: @@ -849,6 +963,9 @@ e1000_probe(struct pci_dev *pdev, return 0; err_register: + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); +err_flashmap: err_sw_init: err_eeprom: iounmap(adapter->hw.hw_addr); @@ -882,6 +999,7 @@ e1000_remove(struct pci_dev *pdev) flush_scheduled_work(); if (adapter->hw.mac_type >= e1000_82540 && + adapter->hw.mac_type != e1000_ich8lan && adapter->hw.media_type == e1000_media_type_copper) { manc = E1000_READ_REG(&adapter->hw, MANC); if (manc & E1000_MANC_SMBUS_EN) { @@ -910,6 +1028,8 @@ e1000_remove(struct pci_dev *pdev) #endif iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); pci_release_regions(pdev); free_netdev(netdev); @@ -947,7 +1067,7 @@ e1000_sw_init(struct e1000_adapter *adapter) pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); - adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; @@ -960,13 +1080,6 @@ e1000_sw_init(struct e1000_adapter *adapter) return -EIO; } - /* initialize eeprom parameters */ - - if (e1000_init_eeprom_params(hw)) { - E1000_ERR("EEPROM initialization failed\n"); - return -EIO; - } - switch (hw->mac_type) { default: break; @@ -1078,6 +1191,10 @@ e1000_open(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); int err; + /* disallow open during test */ + if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags)) + return -EBUSY; + /* allocate transmit descriptors */ if ((err = e1000_setup_all_tx_resources(adapter))) @@ -1088,6 +1205,12 @@ e1000_open(struct net_device *netdev) if ((err = e1000_setup_all_rx_resources(adapter))) goto err_setup_rx; + err = e1000_request_irq(adapter); + if (err) + goto err_up; + + e1000_power_up_phy(adapter); + if ((err = e1000_up(adapter))) goto err_up; adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; @@ -1131,7 +1254,10 @@ e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); e1000_free_all_tx_resources(adapter); e1000_free_all_rx_resources(adapter); @@ -1189,8 +1315,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter, int size; size = sizeof(struct e1000_buffer) * txdr->count; - - txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); + txdr->buffer_info = vmalloc(size); if (!txdr->buffer_info) { DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit descriptor ring\n"); @@ -1302,11 +1427,11 @@ e1000_configure_tx(struct e1000_adapter *adapter) tdba = adapter->tx_ring[0].dma; tdlen = adapter->tx_ring[0].count * sizeof(struct e1000_tx_desc); - E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); - E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); E1000_WRITE_REG(hw, TDLEN, tdlen); - E1000_WRITE_REG(hw, TDH, 0); + E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); + E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(hw, TDT, 0); + E1000_WRITE_REG(hw, TDH, 0); adapter->tx_ring[0].tdh = E1000_TDH; adapter->tx_ring[0].tdt = E1000_TDT; break; @@ -1373,8 +1498,6 @@ e1000_configure_tx(struct e1000_adapter *adapter) } else if (hw->mac_type == e1000_80003es2lan) { tarc = E1000_READ_REG(hw, TARC0); tarc |= 1; - if (hw->media_type == e1000_media_type_internal_serdes) - tarc |= (1 << 20); E1000_WRITE_REG(hw, TARC0, tarc); tarc = E1000_READ_REG(hw, TARC1); tarc |= 1; @@ -1418,7 +1541,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, int size, desc_len; size = sizeof(struct e1000_buffer) * rxdr->count; - rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); + rxdr->buffer_info = vmalloc(size); if (!rxdr->buffer_info) { DPRINTK(PROBE, ERR, "Unable to allocate memory for the receive descriptor ring\n"); @@ -1560,9 +1683,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter) E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); - if (adapter->hw.mac_type > e1000_82543) - rctl |= E1000_RCTL_SECRC; - if (adapter->hw.tbi_compatibility_on == 1) rctl |= E1000_RCTL_SBP; else @@ -1628,7 +1748,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) rfctl |= E1000_RFCTL_IPV6_DIS; E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); - rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; + rctl |= E1000_RCTL_DTYP_PS; psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; @@ -1712,11 +1832,11 @@ e1000_configure_rx(struct e1000_adapter *adapter) case 1: default: rdba = adapter->rx_ring[0].dma; - E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); - E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); E1000_WRITE_REG(hw, RDLEN, rdlen); - E1000_WRITE_REG(hw, RDH, 0); + E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); + E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(hw, RDT, 0); + E1000_WRITE_REG(hw, RDH, 0); adapter->rx_ring[0].rdh = E1000_RDH; adapter->rx_ring[0].rdt = E1000_RDT; break; @@ -1741,9 +1861,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) E1000_WRITE_REG(hw, RXCSUM, rxcsum); } - if (hw->mac_type == e1000_82573) - E1000_WRITE_REG(hw, ERT, 0x0100); - /* Enable Receives */ E1000_WRITE_REG(hw, RCTL, rctl); } @@ -2083,6 +2200,12 @@ e1000_set_multi(struct net_device *netdev) uint32_t rctl; uint32_t hash_value; int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? + E1000_NUM_MTA_REGISTERS_ICH8LAN : + E1000_NUM_MTA_REGISTERS; + + if (adapter->hw.mac_type == e1000_ich8lan) + rar_entries = E1000_RAR_ENTRIES_ICH8LAN; /* reserve RAR[14] for LAA over-write work-around */ if (adapter->hw.mac_type == e1000_82571) @@ -2121,14 +2244,18 @@ e1000_set_multi(struct net_device *netdev) mc_ptr = mc_ptr->next; } else { E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(hw); E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(hw); } } /* clear the old settings from the multicast hash table */ - for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) + for (i = 0; i < mta_reg_count; i++) { E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } /* load any remaining addresses into the hash table */ @@ -2201,19 +2328,19 @@ static void e1000_watchdog(unsigned long data) { struct e1000_adapter *adapter = (struct e1000_adapter *) data; - - /* Do the rest outside of interrupt context */ - schedule_work(&adapter->watchdog_task); -} - -static void -e1000_watchdog_task(struct e1000_adapter *adapter) -{ struct net_device *netdev = adapter->netdev; struct e1000_tx_ring *txdr = adapter->tx_ring; uint32_t link, tctl; - - e1000_check_for_link(&adapter->hw); + int32_t ret_val; + + ret_val = e1000_check_for_link(&adapter->hw); + if ((ret_val == E1000_ERR_PHY) && + (adapter->hw.phy_type == e1000_phy_igp_3) && + (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kumeran_lock_loss_workaround() */ + DPRINTK(LINK, INFO, + "Gigabit has been disabled, downgrading speed\n"); + } if (adapter->hw.mac_type == e1000_82573) { e1000_enable_tx_pkt_filtering(&adapter->hw); if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) @@ -2394,7 +2521,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, uint8_t ipcss, ipcso, tucss, tucso, hdr_len; int err; - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) @@ -2415,7 +2542,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, cmd_length = E1000_TXD_CMD_IP; ipcse = skb->h.raw - skb->data - 1; #ifdef NETIF_F_TSO_IPV6 - } else if (skb->protocol == ntohs(ETH_P_IPV6)) { + } else if (skb->protocol == htons(ETH_P_IPV6)) { skb->nh.ipv6h->payload_len = 0; skb->h.th->check = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, @@ -2519,7 +2646,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, * tso gets written back prematurely before the data is fully * DMA'd to the controller */ if (!skb->data_len && tx_ring->last_tx_tso && - !skb_shinfo(skb)->gso_size) { + !skb_is_gso(skb)) { tx_ring->last_tx_tso = 0; size -= 4; } @@ -2779,9 +2906,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) case e1000_82571: case e1000_82572: case e1000_82573: + case e1000_ich8lan: pull_size = min((unsigned int)4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { - printk(KERN_ERR + DPRINTK(DRV, ERR, "__pskb_pull_tail failed.\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -2806,8 +2934,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) #ifdef NETIF_F_TSO /* Controller Erratum workaround */ - if (!skb->data_len && tx_ring->last_tx_tso && - !skb_shinfo(skb)->gso_size) + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) count++; #endif @@ -2919,8 +3046,7 @@ e1000_reset_task(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); - e1000_down(adapter); - e1000_up(adapter); + e1000_reinit_locked(adapter); } /** @@ -2964,6 +3090,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) /* Adapter-specific max frame size limits. */ switch (adapter->hw.mac_type) { case e1000_undefined ... e1000_82542_rev2_1: + case e1000_ich8lan: if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); return -EINVAL; @@ -2997,7 +3124,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) break; } - /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size * i.e. RXBUFFER_2048 --> size-4096 slab */ @@ -3018,7 +3145,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = E1000_RXBUFFER_16384; /* adjust allocation if LPE protects us, and we aren't using SBP */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 if (!adapter->hw.tbi_compatibility_on && ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) @@ -3026,10 +3152,8 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; - if (netif_running(netdev)) { - e1000_down(adapter); - e1000_up(adapter); - } + if (netif_running(netdev)) + e1000_reinit_locked(adapter); adapter->hw.max_frame_size = max_frame; @@ -3074,12 +3198,15 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.bprc += E1000_READ_REG(hw, BPRC); adapter->stats.mprc += E1000_READ_REG(hw, MPRC); adapter->stats.roc += E1000_READ_REG(hw, ROC); + + if (adapter->hw.mac_type != e1000_ich8lan) { adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); + } adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); adapter->stats.mpc += E1000_READ_REG(hw, MPC); @@ -3107,12 +3234,16 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.totl += E1000_READ_REG(hw, TOTL); adapter->stats.toth += E1000_READ_REG(hw, TOTH); adapter->stats.tpr += E1000_READ_REG(hw, TPR); + + if (adapter->hw.mac_type != e1000_ich8lan) { adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); + } + adapter->stats.mptc += E1000_READ_REG(hw, MPTC); adapter->stats.bptc += E1000_READ_REG(hw, BPTC); @@ -3134,6 +3265,8 @@ e1000_update_stats(struct e1000_adapter *adapter) if (hw->mac_type > e1000_82547_rev_2) { adapter->stats.iac += E1000_READ_REG(hw, IAC); adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); + + if (adapter->hw.mac_type != e1000_ich8lan) { adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); @@ -3141,6 +3274,7 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); + } } /* Fill out the OS statistics structure */ @@ -3249,8 +3383,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) E1000_WRITE_REG(hw, IMC, ~0); E1000_WRITE_FLUSH(hw); } - if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) - __netif_rx_schedule(&adapter->polling_netdev[0]); + if (likely(netif_rx_schedule_prep(netdev))) + __netif_rx_schedule(netdev); else e1000_irq_enable(adapter); #else @@ -3293,34 +3427,26 @@ e1000_clean(struct net_device *poll_dev, int *budget) { struct e1000_adapter *adapter; int work_to_do = min(*budget, poll_dev->quota); - int tx_cleaned = 0, i = 0, work_done = 0; + int tx_cleaned = 0, work_done = 0; /* Must NOT use netdev_priv macro here. */ adapter = poll_dev->priv; /* Keep link state information with original netdev */ - if (!netif_carrier_ok(adapter->netdev)) + if (!netif_carrier_ok(poll_dev)) goto quit_polling; - while (poll_dev != &adapter->polling_netdev[i]) { - i++; - BUG_ON(i == adapter->num_rx_queues); + /* e1000_clean is called per-cpu. This lock protects + * tx_ring[0] from being cleaned by multiple cpus + * simultaneously. A failure obtaining the lock means + * tx_ring[0] is currently being cleaned anyway. */ + if (spin_trylock(&adapter->tx_queue_lock)) { + tx_cleaned = e1000_clean_tx_irq(adapter, + &adapter->tx_ring[0]); + spin_unlock(&adapter->tx_queue_lock); } - if (likely(adapter->num_tx_queues == 1)) { - /* e1000_clean is called per-cpu. This lock protects - * tx_ring[0] from being cleaned by multiple cpus - * simultaneously. A failure obtaining the lock means - * tx_ring[0] is currently being cleaned anyway. */ - if (spin_trylock(&adapter->tx_queue_lock)) { - tx_cleaned = e1000_clean_tx_irq(adapter, - &adapter->tx_ring[0]); - spin_unlock(&adapter->tx_queue_lock); - } - } else - tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); - - adapter->clean_rx(adapter, &adapter->rx_ring[i], + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, work_to_do); *budget -= work_done; @@ -3328,7 +3454,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) /* If no Tx and not enough Rx work done, exit the polling mode */ if ((!tx_cleaned && (work_done == 0)) || - !netif_running(adapter->netdev)) { + !netif_running(poll_dev)) { quit_polling: netif_rx_complete(poll_dev); e1000_irq_enable(adapter); @@ -3543,11 +3669,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, length = le16_to_cpu(rx_desc->length); + /* adjust length to remove Ethernet CRC */ + length -= 4; + if (unlikely(!(status & E1000_RXD_STAT_EOP))) { /* All receives must fit into a single buffer */ E1000_DBG("%s: Receive packet consumed multiple" " buffers\n", netdev->name); - dev_kfree_skb_irq(skb); + /* recycle */ + buffer_info-> skb = skb; goto next_desc; } @@ -3575,7 +3705,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, #define E1000_CB_LENGTH 256 if (length < E1000_CB_LENGTH) { struct sk_buff *new_skb = - dev_alloc_skb(length + NET_IP_ALIGN); + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { skb_reserve(new_skb, NET_IP_ALIGN); new_skb->dev = netdev; @@ -3675,7 +3805,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, buffer_info = &rx_ring->buffer_info[i]; while (staterr & E1000_RXD_STAT_DD) { - buffer_info = &rx_ring->buffer_info[i]; ps_page = &rx_ring->ps_page[i]; ps_page_dma = &rx_ring->ps_page_dma[i]; #ifdef CONFIG_E1000_NAPI @@ -3747,8 +3876,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, pci_dma_sync_single_for_device(pdev, ps_page_dma->ps_page_dma[0], PAGE_SIZE, PCI_DMA_FROMDEVICE); + /* remove the CRC */ + l1 -= 4; skb_put(skb, l1); - length += l1; goto copydone; } /* if */ } @@ -3767,6 +3897,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, skb->truesize += length; } + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive */ + pskb_trim(skb, skb->len - 4); + copydone: e1000_rx_checksum(adapter, staterr, le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); @@ -3842,7 +3976,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, while (cleaned_count--) { if (!(skb = buffer_info->skb)) - skb = dev_alloc_skb(bufsz); + skb = netdev_alloc_skb(netdev, bufsz); else { skb_trim(skb, 0); goto map_skb; @@ -3860,7 +3994,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " "at %p\n", bufsz, skb->data); /* Try again, without freeing the previous */ - skb = dev_alloc_skb(bufsz); + skb = netdev_alloc_skb(netdev, bufsz); /* Failed allocation, critical failure */ if (!skb) { dev_kfree_skb(oldskb); @@ -3984,7 +4118,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, rx_desc->read.buffer_addr[j+1] = ~0; } - skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); + skb = netdev_alloc_skb(netdev, + adapter->rx_ps_bsize0 + NET_IP_ALIGN); if (unlikely(!skb)) { adapter->alloc_rx_buff_failed++; @@ -4180,10 +4315,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return retval; } } - if (netif_running(adapter->netdev)) { - e1000_down(adapter); - e1000_up(adapter); - } else + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else e1000_reset(adapter); break; case M88E1000_PHY_SPEC_CTRL: @@ -4200,10 +4334,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case PHY_CTRL: if (mii_reg & MII_CR_POWER_DOWN) break; - if (netif_running(adapter->netdev)) { - e1000_down(adapter); - e1000_up(adapter); - } else + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else e1000_reset(adapter); break; } @@ -4250,11 +4383,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) pci_write_config_word(adapter->pdev, reg, *value); } +#if 0 uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port) { return inl(port); } +#endif /* 0 */ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) @@ -4277,18 +4412,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); + if (adapter->hw.mac_type != e1000_ich8lan) { /* enable VLAN receive filtering */ rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl |= E1000_RCTL_VFE; rctl &= ~E1000_RCTL_CFIEN; E1000_WRITE_REG(&adapter->hw, RCTL, rctl); e1000_update_mng_vlan(adapter); + } } else { /* disable VLAN tag insert/strip */ ctrl = E1000_READ_REG(&adapter->hw, CTRL); ctrl &= ~E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); + if (adapter->hw.mac_type != e1000_ich8lan) { /* disable VLAN filtering */ rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl &= ~E1000_RCTL_VFE; @@ -4297,6 +4435,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; } + } } e1000_irq_enable(adapter); @@ -4458,12 +4597,16 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t ctrl, ctrl_ext, rctl, manc, status; uint32_t wufc = adapter->wol; +#ifdef CONFIG_PM int retval = 0; +#endif netif_device_detach(netdev); - if (netif_running(netdev)) + if (netif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); + } #ifdef CONFIG_PM /* Implement our own version of pci_save_state(pdev) because pci- @@ -4521,7 +4664,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_enable_wake(pdev, PCI_D3cold, 0); } + /* FIXME: this code is incorrect for PCI Express */ if (adapter->hw.mac_type >= e1000_82540 && + adapter->hw.mac_type != e1000_ich8lan && adapter->hw.media_type == e1000_media_type_copper) { manc = E1000_READ_REG(&adapter->hw, MANC); if (manc & E1000_MANC_SMBUS_EN) { @@ -4532,6 +4677,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) } } + if (adapter->hw.phy_type == e1000_phy_igp_3) + e1000_phy_powerdown_workaround(&adapter->hw); + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ e1000_release_hw_control(adapter); @@ -4567,7 +4715,9 @@ e1000_resume(struct pci_dev *pdev) netif_device_attach(netdev); + /* FIXME: this code is incorrect for PCI Express */ if (adapter->hw.mac_type >= e1000_82540 && + adapter->hw.mac_type != e1000_ich8lan && adapter->hw.media_type == e1000_media_type_copper) { manc = E1000_READ_REG(&adapter->hw, MANC); manc &= ~(E1000_MANC_ARP_EN); @@ -4601,6 +4751,7 @@ static void e1000_netpoll(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_clean_tx_irq(adapter, adapter->tx_ring);