igb: cleanup tx dma so map & unmap use matching calls
[pandora-kernel.git] / drivers / net / igb / igb_main.c
index f0c3a01..ca84216 100644 (file)
@@ -62,8 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
 
 static struct pci_device_id igb_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -275,6 +277,17 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
 }
 #endif
 
+/**
+ * igb_desc_unused - calculate if we have unused descriptors
+ **/
+static int igb_desc_unused(struct igb_ring *ring)
+{
+       if (ring->next_to_clean > ring->next_to_use)
+               return ring->next_to_clean - ring->next_to_use - 1;
+
+       return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
 /**
  * igb_init_module - Driver Registration Routine
  *
@@ -871,12 +884,12 @@ static void igb_configure(struct igb_adapter *adapter)
 
        igb_rx_fifo_flush_82575(&adapter->hw);
 
-       /* call IGB_DESC_UNUSED which always leaves
+       /* call igb_desc_unused which always leaves
         * at least 1 descriptor unused to make sure
         * next_to_use != next_to_clean */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct igb_ring *ring = &adapter->rx_ring[i];
-               igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
+               igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
        }
 
 
@@ -1126,11 +1139,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        struct net_device *netdev;
        struct igb_adapter *adapter;
        struct e1000_hw *hw;
-       struct pci_dev *us_dev;
        const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
        unsigned long mmio_start, mmio_len;
-       int err, pci_using_dac, pos;
-       u16 eeprom_data = 0, state = 0;
+       int err, pci_using_dac;
+       u16 eeprom_data = 0;
        u16 eeprom_apme_mask = IGB_EEPROM_APME;
        u32 part_num;
 
@@ -1156,27 +1168,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                }
        }
 
-       /* 82575 requires that the pci-e link partner disable the L0s state */
-       switch (pdev->device) {
-       case E1000_DEV_ID_82575EB_COPPER:
-       case E1000_DEV_ID_82575EB_FIBER_SERDES:
-       case E1000_DEV_ID_82575GB_QUAD_COPPER:
-               us_dev = pdev->bus->self;
-               pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
-               if (pos) {
-                       pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
-                                            &state);
-                       state &= ~PCIE_LINK_STATE_L0S;
-                       pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
-                                             state);
-                       dev_info(&pdev->dev,
-                                "Disabling ASPM L0s upstream switch port %s\n",
-                                pci_name(us_dev));
-               }
-       default:
-               break;
-       }
-
        err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
                                           IORESOURCE_MEM),
                                           igb_driver_name);
@@ -1321,13 +1312,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                goto err_eeprom;
        }
 
-       init_timer(&adapter->watchdog_timer);
-       adapter->watchdog_timer.function = &igb_watchdog;
-       adapter->watchdog_timer.data = (unsigned long) adapter;
-
-       init_timer(&adapter->phy_info_timer);
-       adapter->phy_info_timer.function = &igb_update_phy_info;
-       adapter->phy_info_timer.data = (unsigned long) adapter;
+       setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+                   (unsigned long) adapter);
+       setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+                   (unsigned long) adapter);
 
        INIT_WORK(&adapter->reset_task, igb_reset_task);
        INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -1374,6 +1362,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
                        adapter->eeprom_wol = 0;
                break;
+       case E1000_DEV_ID_82576_QUAD_COPPER:
+               /* if quad port adapter, disable WoL on all but port A */
+               if (global_quad_port_a != 0)
+                       adapter->eeprom_wol = 0;
+               else
+                       adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+               /* Reset for multiple quad port adapters */
+               if (++global_quad_port_a == 4)
+                       global_quad_port_a = 0;
+               break;
        }
 
        /* initialize the wol settings based on the eeprom settings */
@@ -2259,19 +2257,14 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
 static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
                                           struct igb_buffer *buffer_info)
 {
-       if (buffer_info->dma) {
-               pci_unmap_page(adapter->pdev,
-                               buffer_info->dma,
-                               buffer_info->length,
-                               PCI_DMA_TODEVICE);
-               buffer_info->dma = 0;
-       }
+       buffer_info->dma = 0;
        if (buffer_info->skb) {
+               skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+                             DMA_TO_DEVICE);
                dev_kfree_skb_any(buffer_info->skb);
                buffer_info->skb = NULL;
        }
        buffer_info->time_stamp = 0;
-       buffer_info->next_to_watch = 0;
        /* buffer_info must be completely set up in the transmit path */
 }
 
@@ -2466,7 +2459,7 @@ static void igb_set_multi(struct net_device *netdev)
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_mac_info *mac = &hw->mac;
        struct dev_mc_list *mc_ptr;
-       u8  *mta_list;
+       u8  *mta_list = NULL;
        u32 rctl;
        int i;
 
@@ -2487,17 +2480,15 @@ static void igb_set_multi(struct net_device *netdev)
        }
        wr32(E1000_RCTL, rctl);
 
-       if (!netdev->mc_count) {
-               /* nothing to program, so clear mc list */
-               igb_update_mc_addr_list(hw, NULL, 0, 1,
-                                       mac->rar_entry_count);
-               return;
+       if (netdev->mc_count) {
+               mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+               if (!mta_list) {
+                       dev_err(&adapter->pdev->dev,
+                               "failed to allocate multicast filter list\n");
+                       return;
+               }
        }
 
-       mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
-       if (!mta_list)
-               return;
-
        /* The shared function expects a packed array of only addresses. */
        mc_ptr = netdev->mc_list;
 
@@ -2671,7 +2662,7 @@ link_up:
        igb_update_adaptive(&adapter->hw);
 
        if (!netif_carrier_ok(netdev)) {
-               if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
+               if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
                         * to get done, so reset controller to flush Tx.
@@ -3018,7 +3009,18 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
                tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
+                       __be16 protocol;
+
+                       if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+                               const struct vlan_ethhdr *vhdr =
+                                         (const struct vlan_ethhdr*)skb->data;
+
+                               protocol = vhdr->h_vlan_encapsulated_proto;
+                       } else {
+                               protocol = skb->protocol;
+                       }
+
+                       switch (protocol) {
                        case cpu_to_be16(ETH_P_IP):
                                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -3071,25 +3073,33 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
        unsigned int len = skb_headlen(skb);
        unsigned int count = 0, i;
        unsigned int f;
+       dma_addr_t *map;
 
        i = tx_ring->next_to_use;
 
+       if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+               dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+               return 0;
+       }
+
+       map = skb_shinfo(skb)->dma_maps;
+
        buffer_info = &tx_ring->buffer_info[i];
        BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
        buffer_info->length = len;
        /* set time_stamp *before* dma to help avoid a possible race */
        buffer_info->time_stamp = jiffies;
        buffer_info->next_to_watch = i;
-       buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
-                                         PCI_DMA_TODEVICE);
+       buffer_info->dma = map[count];
        count++;
-       i++;
-       if (i == tx_ring->count)
-               i = 0;
 
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
                struct skb_frag_struct *frag;
 
+               i++;
+               if (i == tx_ring->count)
+                       i = 0;
+
                frag = &skb_shinfo(skb)->frags[f];
                len = frag->size;
 
@@ -3098,19 +3108,10 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
                buffer_info->length = len;
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
-               buffer_info->dma = pci_map_page(adapter->pdev,
-                                               frag->page,
-                                               frag->page_offset,
-                                               len,
-                                               PCI_DMA_TODEVICE);
-
+               buffer_info->dma = map[count];
                count++;
-               i++;
-               if (i == tx_ring->count)
-                       i = 0;
        }
 
-       i = ((i == 0) ? tx_ring->count - 1 : i - 1);
        tx_ring->buffer_info[i].skb = skb;
        tx_ring->buffer_info[first].next_to_watch = i;
 
@@ -3198,7 +3199,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 
        /* We need to check again in a case another CPU has just
         * made room available. */
-       if (IGB_DESC_UNUSED(tx_ring) < size)
+       if (igb_desc_unused(tx_ring) < size)
                return -EBUSY;
 
        /* A reprieve! */
@@ -3210,7 +3211,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 static int igb_maybe_stop_tx(struct net_device *netdev,
                             struct igb_ring *tx_ring, int size)
 {
-       if (IGB_DESC_UNUSED(tx_ring) >= size)
+       if (igb_desc_unused(tx_ring) >= size)
                return 0;
        return __igb_maybe_stop_tx(netdev, tx_ring, size);
 }
@@ -3223,6 +3224,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
        unsigned int first;
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
+       int count = 0;
        int tso = 0;
        union skb_shared_tx *shtx;
 
@@ -3284,14 +3286,23 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGB_TX_FLAGS_CSUM;
 
-       igb_tx_queue_adv(adapter, tx_ring, tx_flags,
-                        igb_tx_map_adv(adapter, tx_ring, skb, first),
-                        skb->len, hdr_len);
-
-       netdev->trans_start = jiffies;
-
-       /* Make sure there is space in the ring for the next send. */
-       igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+       /*
+        * count reflects descriptors mapped, if 0 then mapping error
+        * has occured and we need to rewind the descriptor queue
+        */
+       count = igb_tx_map_adv(adapter, tx_ring, skb, first);
+
+       if (count) {
+               igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
+                                skb->len, hdr_len);
+               netdev->trans_start = jiffies;
+               /* Make sure there is space in the ring for the next send. */
+               igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+       } else {
+               dev_kfree_skb_any(skb);
+               tx_ring->buffer_info[first].time_stamp = 0;
+               tx_ring->next_to_use = first;
+       }
 
        return NETDEV_TX_OK;
 }
@@ -3828,7 +3839,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
 
        for (i = 0; i < adapter->vfs_allocated_count; i++) {
                vf_data = &adapter->vf_data[i];
-               for (j = 0; j < vf_data[i].num_vf_mc_hashes; j++)
+               for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
                        igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
        }
 }
@@ -3895,10 +3906,15 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 
                        /* if !enabled we need to set this up in vfta */
                        if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
-                               /* add VID to filter table */
-                               igb_vfta_set(hw, vid, true);
+                               /* add VID to filter table, if bit already set
+                                * PF must have added it outside of table */
+                               if (igb_vfta_set(hw, vid, true))
+                                       reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
+                                               adapter->vfs_allocated_count);
                                reg |= E1000_VLVF_VLANID_ENABLE;
                        }
+                       reg &= ~E1000_VLVF_VLANID_MASK;
+                       reg |= vid;
 
                        wr32(E1000_VLVF(i), reg);
                        return 0;
@@ -4304,7 +4320,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 
        if (unlikely(count &&
                     netif_carrier_ok(netdev) &&
-                    IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+                    igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
@@ -4581,7 +4597,7 @@ next_desc:
        }
 
        rx_ring->next_to_clean = i;
-       cleaned_count = IGB_DESC_UNUSED(rx_ring);
+       cleaned_count = igb_desc_unused(rx_ring);
 
        if (cleaned_count)
                igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);