Merge git://github.com/Jkirsher/net-next
[pandora-kernel.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index b73194c..bb069bc 100644 (file)
@@ -79,59 +79,32 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
  *   Class, Class Mask, private data (not used) }
  */
 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
-        board_82598 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
-        board_X540 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2),
-        board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS),
-        board_82599 },
-
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
        /* required last entry */
        {0, }
 };
@@ -385,7 +358,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
                tx_ring = adapter->tx_ring[n];
                tx_buffer_info =
                        &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
-               pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+               pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
                           n, tx_ring->next_to_use, tx_ring->next_to_clean,
                           (u64)tx_buffer_info->dma,
                           tx_buffer_info->length,
@@ -424,7 +397,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
                        u0 = (struct my_u0 *)tx_desc;
                        pr_info("T [0x%03X]    %016llX %016llX %016llX"
-                               " %04X  %3X %016llX %p", i,
+                               " %04X  %p %016llX %p", i,
                                le64_to_cpu(u0->a),
                                le64_to_cpu(u0->b),
                                (u64)tx_buffer_info->dma,
@@ -643,27 +616,31 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
        }
 }
 
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
-                                     struct ixgbe_tx_buffer *tx_buffer_info)
+static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
+                                          struct ixgbe_tx_buffer *tx_buffer)
 {
-       if (tx_buffer_info->dma) {
-               if (tx_buffer_info->mapped_as_page)
-                       dma_unmap_page(tx_ring->dev,
-                                      tx_buffer_info->dma,
-                                      tx_buffer_info->length,
-                                      DMA_TO_DEVICE);
+       if (tx_buffer->dma) {
+               if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
+                       dma_unmap_page(ring->dev,
+                                      tx_buffer->dma,
+                                      tx_buffer->length,
+                                      DMA_TO_DEVICE);
                else
-                       dma_unmap_single(tx_ring->dev,
-                                        tx_buffer_info->dma,
-                                        tx_buffer_info->length,
-                                        DMA_TO_DEVICE);
-               tx_buffer_info->dma = 0;
+                       dma_unmap_single(ring->dev,
+                                        tx_buffer->dma,
+                                        tx_buffer->length,
+                                        DMA_TO_DEVICE);
        }
-       if (tx_buffer_info->skb) {
+       tx_buffer->dma = 0;
+}
+
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+                                     struct ixgbe_tx_buffer *tx_buffer_info)
+{
+       ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+       if (tx_buffer_info->skb)
                dev_kfree_skb_any(tx_buffer_info->skb);
-               tx_buffer_info->skb = NULL;
-       }
-       tx_buffer_info->time_stamp = 0;
+       tx_buffer_info->skb = NULL;
        /* tx_buffer_info must be completely set up in the transmit path */
 }
 
@@ -797,56 +774,72 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
-       struct ixgbe_tx_buffer *tx_buffer_info;
+       struct ixgbe_tx_buffer *tx_buffer;
+       union ixgbe_adv_tx_desc *tx_desc;
        unsigned int total_bytes = 0, total_packets = 0;
-       u16 i, eop, count = 0;
+       unsigned int budget = q_vector->tx.work_limit;
+       u16 i = tx_ring->next_to_clean;
 
-       i = tx_ring->next_to_clean;
-       eop = tx_ring->tx_buffer_info[i].next_to_watch;
-       eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+       tx_buffer = &tx_ring->tx_buffer_info[i];
+       tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
 
-       while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
-              (count < q_vector->tx.work_limit)) {
-               bool cleaned = false;
-               rmb(); /* read buffer_info after eop_desc */
-               for ( ; !cleaned; count++) {
-                       tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
-                       tx_buffer_info = &tx_ring->tx_buffer_info[i];
+       for (; budget; budget--) {
+               union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+               /* if next_to_watch is not set then there is no work pending */
+               if (!eop_desc)
+                       break;
+
+               /* if DD is not set pending work has not been completed */
+               if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+                       break;
 
+               /* count the packet as being completed */
+               tx_ring->tx_stats.completed++;
+
+               /* clear next_to_watch to prevent false hangs */
+               tx_buffer->next_to_watch = NULL;
+
+               /* prevent any other reads prior to eop_desc being verified */
+               rmb();
+
+               do {
+                       ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
                        tx_desc->wb.status = 0;
-                       cleaned = (i == eop);
+                       if (likely(tx_desc == eop_desc)) {
+                               eop_desc = NULL;
+                               dev_kfree_skb_any(tx_buffer->skb);
+                               tx_buffer->skb = NULL;
 
+                               total_bytes += tx_buffer->bytecount;
+                               total_packets += tx_buffer->gso_segs;
+                       }
+
+                       tx_buffer++;
+                       tx_desc++;
                        i++;
-                       if (i == tx_ring->count)
+                       if (unlikely(i == tx_ring->count)) {
                                i = 0;
 
-                       if (cleaned && tx_buffer_info->skb) {
-                               total_bytes += tx_buffer_info->bytecount;
-                               total_packets += tx_buffer_info->gso_segs;
+                               tx_buffer = tx_ring->tx_buffer_info;
+                               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
                        }
 
-                       ixgbe_unmap_and_free_tx_resource(tx_ring,
-                                                        tx_buffer_info);
-               }
-
-               tx_ring->tx_stats.completed++;
-               eop = tx_ring->tx_buffer_info[i].next_to_watch;
-               eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+               } while (eop_desc);
        }
 
        tx_ring->next_to_clean = i;
+       u64_stats_update_begin(&tx_ring->syncp);
        tx_ring->stats.bytes += total_bytes;
        tx_ring->stats.packets += total_packets;
-       u64_stats_update_begin(&tx_ring->syncp);
+       u64_stats_update_end(&tx_ring->syncp);
        q_vector->tx.total_bytes += total_bytes;
        q_vector->tx.total_packets += total_packets;
-       u64_stats_update_end(&tx_ring->syncp);
 
        if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                struct ixgbe_hw *hw = &adapter->hw;
-               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
                e_err(drv, "Detected Tx Unit Hang\n"
                        "  Tx Queue             <%d>\n"
                        "  TDH, TDT             <%x>, <%x>\n"
@@ -858,8 +851,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                        tx_ring->queue_index,
                        IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
                        IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
-                       tx_ring->next_to_use, eop,
-                       tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+                       tx_ring->next_to_use, i,
+                       tx_ring->tx_buffer_info[i].time_stamp, jiffies);
 
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -875,7 +868,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        }
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-       if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
+       if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
                     (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
@@ -888,7 +881,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                }
        }
 
-       return count < q_vector->tx.work_limit;
+       return !!budget;
 }
 
 #ifdef CONFIG_IXGBE_DCA
@@ -904,12 +897,12 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
                rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
-               rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
                rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
-               rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+               rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
                           IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
                break;
        default:
@@ -933,7 +926,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
        case ixgbe_mac_82598EB:
                txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
                txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
-               txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               txctrl |= dca3_get_tag(tx_ring->dev, cpu);
                txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
                IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
                break;
@@ -941,7 +934,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
        case ixgbe_mac_X540:
                txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
                txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
-               txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+               txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
                           IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
                txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
                IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
@@ -954,26 +947,17 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
+       struct ixgbe_ring *ring;
        int cpu = get_cpu();
-       long r_idx;
-       int i;
 
        if (q_vector->cpu == cpu)
                goto out_no_update;
 
-       r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->tx.count; i++) {
-               ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
-               r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
+       for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+               ixgbe_update_tx_dca(adapter, ring, cpu);
 
-       r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rx.count; i++) {
-               ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
-               r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
+       for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+               ixgbe_update_rx_dca(adapter, ring, cpu);
 
        q_vector->cpu = cpu;
 out_no_update:
@@ -1286,9 +1270,9 @@ static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
                IXGBE_RXDADV_RSCCNT_MASK);
 }
 
-static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *rx_ring,
-                              int *work_done, int work_to_do)
+                              int budget)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
@@ -1468,11 +1452,11 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 #endif /* IXGBE_FCOE */
                ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
 
+               budget--;
 next_desc:
                rx_desc->wb.upper.status_error = 0;
 
-               (*work_done)++;
-               if (*work_done >= work_to_do)
+               if (!budget)
                        break;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -1513,9 +1497,10 @@ next_desc:
        u64_stats_update_end(&rx_ring->syncp);
        q_vector->rx.total_packets += total_rx_packets;
        q_vector->rx.total_bytes += total_rx_bytes;
+
+       return !!budget;
 }
 
-static int ixgbe_clean_rxonly(struct napi_struct *, int);
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -1526,61 +1511,39 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_q_vector *q_vector;
-       int i, q_vectors, v_idx, r_idx;
+       int q_vectors, v_idx;
        u32 mask;
 
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
+       /* Populate MSIX to EITR Select */
+       if (adapter->num_vfs > 32) {
+               u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+       }
+
        /*
         * Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
        for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+               struct ixgbe_ring *ring;
                q_vector = adapter->q_vector[v_idx];
-               /* XXX for_each_set_bit(...) */
-               r_idx = find_first_bit(q_vector->rx.idx,
-                                      adapter->num_rx_queues);
-
-               for (i = 0; i < q_vector->rx.count; i++) {
-                       u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
-                       r_idx = find_next_bit(q_vector->rx.idx,
-                                             adapter->num_rx_queues,
-                                             r_idx + 1);
-               }
-               r_idx = find_first_bit(q_vector->tx.idx,
-                                      adapter->num_tx_queues);
-
-               for (i = 0; i < q_vector->tx.count; i++) {
-                       u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
-                       r_idx = find_next_bit(q_vector->tx.idx,
-                                             adapter->num_tx_queues,
-                                             r_idx + 1);
-               }
 
-               if (q_vector->tx.count && !q_vector->rx.count)
+               for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+                       ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+               for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+                       ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+               if (q_vector->tx.ring && !q_vector->rx.ring)
                        /* tx only */
                        q_vector->eitr = adapter->tx_eitr_param;
-               else if (q_vector->rx.count)
+               else if (q_vector->rx.ring)
                        /* rx or mixed */
                        q_vector->eitr = adapter->rx_eitr_param;
 
                ixgbe_write_eitr(q_vector);
-               /* If ATR is enabled, set interrupt affinity */
-               if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
-                       /*
-                        * Allocate the affinity_hint cpumask, assign the mask
-                        * for this vector, and set our affinity_hint for
-                        * this irq.
-                        */
-                       if (!alloc_cpumask_var(&q_vector->affinity_mask,
-                                              GFP_KERNEL))
-                               return;
-                       cpumask_set_cpu(v_idx, q_vector->affinity_mask);
-                       irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
-                                             q_vector->affinity_mask);
-               }
        }
 
        switch (adapter->hw.mac.type) {
@@ -1865,72 +1828,6 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
        }
 }
 
-static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
-{
-       struct ixgbe_adapter *adapter = data;
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 eicr;
-
-       /*
-        * Workaround for Silicon errata.  Use clear-by-write instead
-        * of clear-by-read.  Reading with EICS will return the
-        * interrupt causes without clearing, which later be done
-        * with the write to EICR.
-        */
-       eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
-       IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
-
-       if (eicr & IXGBE_EICR_LSC)
-               ixgbe_check_lsc(adapter);
-
-       if (eicr & IXGBE_EICR_MAILBOX)
-               ixgbe_msg_task(adapter);
-
-       switch (hw->mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               /* Handle Flow Director Full threshold interrupt */
-               if (eicr & IXGBE_EICR_FLOW_DIR) {
-                       int reinit_count = 0;
-                       int i;
-                       for (i = 0; i < adapter->num_tx_queues; i++) {
-                               struct ixgbe_ring *ring = adapter->tx_ring[i];
-                               if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
-                                                      &ring->state))
-                                       reinit_count++;
-                       }
-                       if (reinit_count) {
-                               /* no more flow director interrupts until after init */
-                               IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
-                               eicr &= ~IXGBE_EICR_FLOW_DIR;
-                               adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
-                               ixgbe_service_event_schedule(adapter);
-                       }
-               }
-               ixgbe_check_sfp_event(adapter, eicr);
-               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
-                       if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
-                               adapter->interrupt_event = eicr;
-                               adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
-                               ixgbe_service_event_schedule(adapter);
-                       }
-               }
-               break;
-       default:
-               break;
-       }
-
-       ixgbe_check_fan_failure(adapter, eicr);
-
-       /* re-enable the original interrupt state, no lsc, no queues */
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
-                               ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
-
-       return IRQ_HANDLED;
-}
-
 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
                                           u64 qmask)
 {
@@ -1983,232 +1880,122 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
        /* skip the flush */
 }
 
-static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
-{
-       struct ixgbe_q_vector *q_vector = data;
-       struct ixgbe_adapter  *adapter = q_vector->adapter;
-       struct ixgbe_ring     *tx_ring;
-       int i, r_idx;
-
-       if (!q_vector->tx.count)
-               return IRQ_HANDLED;
-
-       r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->tx.count; i++) {
-               tx_ring = adapter->tx_ring[r_idx];
-               r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
-
-       /* EIAM disabled interrupts (on this vector) for us */
-       napi_schedule(&q_vector->napi);
-
-       return IRQ_HANDLED;
-}
-
 /**
- * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
- * @irq: unused
- * @data: pointer to our q_vector struct for this interrupt vector
+ * ixgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
  **/
-static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
-{
-       struct ixgbe_q_vector *q_vector = data;
-       struct ixgbe_adapter  *adapter = q_vector->adapter;
-       struct ixgbe_ring  *rx_ring;
-       int r_idx;
-       int i;
-
-#ifdef CONFIG_IXGBE_DCA
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_dca(q_vector);
-#endif
-
-       r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rx.count; i++) {
-               rx_ring = adapter->rx_ring[r_idx];
-               r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
-
-       if (!q_vector->rx.count)
-               return IRQ_HANDLED;
-
-       /* EIAM disabled interrupts (on this vector) for us */
-       napi_schedule(&q_vector->napi);
-
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
+                                   bool flush)
 {
-       struct ixgbe_q_vector *q_vector = data;
-       struct ixgbe_adapter  *adapter = q_vector->adapter;
-       struct ixgbe_ring  *ring;
-       int r_idx;
-       int i;
+       u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 
-       if (!q_vector->tx.count && !q_vector->rx.count)
-               return IRQ_HANDLED;
-
-       r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->tx.count; i++) {
-               ring = adapter->tx_ring[r_idx];
-               r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
+       /* don't reenable LSC while waiting for link */
+       if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+               mask &= ~IXGBE_EIMS_LSC;
 
-       r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rx.count; i++) {
-               ring = adapter->rx_ring[r_idx];
-               r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
-                                     r_idx + 1);
+       if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+               mask |= IXGBE_EIMS_GPI_SDP0;
+       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
+               mask |= IXGBE_EIMS_GPI_SDP1;
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               mask |= IXGBE_EIMS_ECC;
+               mask |= IXGBE_EIMS_GPI_SDP1;
+               mask |= IXGBE_EIMS_GPI_SDP2;
+               mask |= IXGBE_EIMS_MAILBOX;
+               break;
+       default:
+               break;
        }
+       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+           !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
+               mask |= IXGBE_EIMS_FLOW_DIR;
 
-       /* EIAM disabled interrupts (on this vector) for us */
-       napi_schedule(&q_vector->napi);
-
-       return IRQ_HANDLED;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+       if (queues)
+               ixgbe_irq_enable_queues(adapter, ~0);
+       if (flush)
+               IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
-/**
- * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
- **/
-static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
+static irqreturn_t ixgbe_msix_other(int irq, void *data)
 {
-       struct ixgbe_q_vector *q_vector =
-                              container_of(napi, struct ixgbe_q_vector, napi);
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring *rx_ring = NULL;
-       int work_done = 0;
-       long r_idx;
+       struct ixgbe_adapter *adapter = data;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 eicr;
 
-#ifdef CONFIG_IXGBE_DCA
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_dca(q_vector);
-#endif
+       /*
+        * Workaround for Silicon errata.  Use clear-by-write instead
+        * of clear-by-read.  Reading with EICS will return the
+        * interrupt causes without clearing, which later be done
+        * with the write to EICR.
+        */
+       eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+       IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
 
-       r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
-       rx_ring = adapter->rx_ring[r_idx];
+       if (eicr & IXGBE_EICR_LSC)
+               ixgbe_check_lsc(adapter);
 
-       ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+       if (eicr & IXGBE_EICR_MAILBOX)
+               ixgbe_msg_task(adapter);
 
-       /* If all Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->rx_itr_setting & 1)
-                       ixgbe_set_itr(q_vector);
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter,
-                                               ((u64)1 << q_vector->v_idx));
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (eicr & IXGBE_EICR_ECC)
+                       e_info(link, "Received unrecoverable ECC Err, please "
+                              "reboot\n");
+               /* Handle Flow Director Full threshold interrupt */
+               if (eicr & IXGBE_EICR_FLOW_DIR) {
+                       int reinit_count = 0;
+                       int i;
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               struct ixgbe_ring *ring = adapter->tx_ring[i];
+                               if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                                                      &ring->state))
+                                       reinit_count++;
+                       }
+                       if (reinit_count) {
+                               /* no more flow director interrupts until after init */
+                               IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
+                               adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+                               ixgbe_service_event_schedule(adapter);
+                       }
+               }
+               ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+                               adapter->interrupt_event = eicr;
+                               adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+                               ixgbe_service_event_schedule(adapter);
+                       }
+               }
+               break;
+       default:
+               break;
        }
 
-       return work_done;
-}
-
-/**
- * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean more than one rx queue associated with a
- * q_vector.
- **/
-static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
-{
-       struct ixgbe_q_vector *q_vector =
-                              container_of(napi, struct ixgbe_q_vector, napi);
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring *ring = NULL;
-       int work_done = 0, i;
-       long r_idx;
-       bool tx_clean_complete = true;
-
-#ifdef CONFIG_IXGBE_DCA
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_dca(q_vector);
-#endif
-
-       r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->tx.count; i++) {
-               ring = adapter->tx_ring[r_idx];
-               tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
-               r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
+       ixgbe_check_fan_failure(adapter, eicr);
 
-       /* attempt to distribute budget to each queue fairly, but don't allow
-        * the budget to go below 1 because we'll exit polling */
-       budget /= (q_vector->rx.count ?: 1);
-       budget = max(budget, 1);
-       r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rx.count; i++) {
-               ring = adapter->rx_ring[r_idx];
-               ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
-               r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
-
-       r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
-       ring = adapter->rx_ring[r_idx];
-       /* If all Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->rx_itr_setting & 1)
-                       ixgbe_set_itr(q_vector);
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter,
-                                               ((u64)1 << q_vector->v_idx));
-               return 0;
-       }
+       /* re-enable the original interrupt state, no lsc, no queues */
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter, false, false);
 
-       return work_done;
+       return IRQ_HANDLED;
 }
 
-/**
- * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
- **/
-static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
+static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
 {
-       struct ixgbe_q_vector *q_vector =
-                              container_of(napi, struct ixgbe_q_vector, napi);
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring *tx_ring = NULL;
-       int work_done = 0;
-       long r_idx;
-
-#ifdef CONFIG_IXGBE_DCA
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_dca(q_vector);
-#endif
-
-       r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
-       tx_ring = adapter->tx_ring[r_idx];
+       struct ixgbe_q_vector *q_vector = data;
 
-       if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
-               work_done = budget;
+       /* EIAM disabled interrupts (on this vector) for us */
 
-       /* If all Tx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->tx_itr_setting & 1)
-                       ixgbe_set_itr(q_vector);
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter,
-                                               ((u64)1 << q_vector->v_idx));
-       }
+       if (q_vector->rx.ring || q_vector->tx.ring)
+               napi_schedule(&q_vector->napi);
 
-       return work_done;
+       return IRQ_HANDLED;
 }
 
 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
@@ -2217,9 +2004,10 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
        struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
 
-       set_bit(r_idx, q_vector->rx.idx);
-       q_vector->rx.count++;
        rx_ring->q_vector = q_vector;
+       rx_ring->next = q_vector->rx.ring;
+       q_vector->rx.ring = rx_ring;
+       q_vector->rx.count++;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
@@ -2228,9 +2016,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
        struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
 
-       set_bit(t_idx, q_vector->tx.idx);
-       q_vector->tx.count++;
        tx_ring->q_vector = q_vector;
+       tx_ring->next = q_vector->tx.ring;
+       q_vector->tx.ring = tx_ring;
+       q_vector->tx.count++;
        q_vector->tx.work_limit = a->tx_work_limit;
 }
 
@@ -2244,59 +2033,41 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
  * group the rings as "efficiently" as possible.  You would add new
  * mapping configurations in here.
  **/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
+static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
 {
-       int q_vectors;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
+       int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
        int v_start = 0;
-       int rxr_idx = 0, txr_idx = 0;
-       int rxr_remaining = adapter->num_rx_queues;
-       int txr_remaining = adapter->num_tx_queues;
-       int i, j;
-       int rqpv, tqpv;
-       int err = 0;
 
-       /* No mapping required if MSI-X is disabled. */
+       /* only one q_vector if MSI-X is disabled. */
        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
-               goto out;
-
-       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+               q_vectors = 1;
 
        /*
-        * The ideal configuration...
-        * We have enough vectors to map one per queue.
+        * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+        * group them so there are multiple queues per vector.
+        *
+        * Re-adjusting *qpv takes care of the remainder.
         */
-       if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
-               for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+       for (; v_start < q_vectors && rxr_remaining; v_start++) {
+               int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
+               for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
                        map_vector_to_rxq(adapter, v_start, rxr_idx);
-
-               for (; txr_idx < txr_remaining; v_start++, txr_idx++)
-                       map_vector_to_txq(adapter, v_start, txr_idx);
-
-               goto out;
        }
 
        /*
-        * If we don't have enough vectors for a 1-to-1
-        * mapping, we'll have to group them so there are
-        * multiple queues per vector.
+        * If there are not enough q_vectors for each ring to have it's own
+        * vector then we must pair up Rx/Tx on a each vector
         */
-       /* Re-adjusting *qpv takes care of the remainder. */
-       for (i = v_start; i < q_vectors; i++) {
-               rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
-               for (j = 0; j < rqpv; j++) {
-                       map_vector_to_rxq(adapter, i, rxr_idx);
-                       rxr_idx++;
-                       rxr_remaining--;
-               }
-               tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
-               for (j = 0; j < tqpv; j++) {
-                       map_vector_to_txq(adapter, i, txr_idx);
-                       txr_idx++;
-                       txr_remaining--;
-               }
+       if ((v_start + txr_remaining) > q_vectors)
+               v_start = 0;
+
+       for (; v_start < q_vectors && txr_remaining; v_start++) {
+               int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
+               for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
+                       map_vector_to_txq(adapter, v_start, txr_idx);
        }
-out:
-       return err;
 }
 
 /**
@@ -2309,110 +2080,65 @@ out:
 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       irqreturn_t (*handler)(int, void *);
-       int i, vector, q_vectors, err;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       int vector, err;
        int ri = 0, ti = 0;
 
-       /* Decrement for Other and TCP Timer vectors */
-       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-       err = ixgbe_map_rings_to_vectors(adapter);
-       if (err)
-               return err;
-
-#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count)        \
-                                         ? &ixgbe_msix_clean_many : \
-                         (_v)->rx.count ? &ixgbe_msix_clean_rx   : \
-                         (_v)->tx.count ? &ixgbe_msix_clean_tx   : \
-                         NULL)
        for (vector = 0; vector < q_vectors; vector++) {
                struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
-               handler = SET_HANDLER(q_vector);
+               struct msix_entry *entry = &adapter->msix_entries[vector];
 
-               if (handler == &ixgbe_msix_clean_rx) {
+               if (q_vector->tx.ring && q_vector->rx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
-                                "%s-%s-%d", netdev->name, "rx", ri++);
-               } else if (handler == &ixgbe_msix_clean_tx) {
+                                "%s-%s-%d", netdev->name, "TxRx", ri++);
+                       ti++;
+               } else if (q_vector->rx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
-                                "%s-%s-%d", netdev->name, "tx", ti++);
-               } else if (handler == &ixgbe_msix_clean_many) {
+                                "%s-%s-%d", netdev->name, "rx", ri++);
+               } else if (q_vector->tx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
-                                "%s-%s-%d", netdev->name, "TxRx", ri++);
-                       ti++;
+                                "%s-%s-%d", netdev->name, "tx", ti++);
                } else {
                        /* skip this unused q_vector */
                        continue;
                }
-               err = request_irq(adapter->msix_entries[vector].vector,
-                                 handler, 0, q_vector->name,
-                                 q_vector);
+               err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
+                                 q_vector->name, q_vector);
                if (err) {
                        e_err(probe, "request_irq failed for MSIX interrupt "
                              "Error: %d\n", err);
                        goto free_queue_irqs;
                }
+               /* If Flow Director is enabled, set interrupt affinity */
+               if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+                       /* assign the mask for this irq */
+                       irq_set_affinity_hint(entry->vector,
+                                             q_vector->affinity_mask);
+               }
        }
 
-       sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
-       err = request_irq(adapter->msix_entries[vector].vector,
-                         ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter);
-       if (err) {
-               e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
-               goto free_queue_irqs;
-       }
-
-       return 0;
-
-free_queue_irqs:
-       for (i = vector - 1; i >= 0; i--)
-               free_irq(adapter->msix_entries[--vector].vector,
-                        adapter->q_vector[i]);
-       adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
-       pci_disable_msix(adapter->pdev);
-       kfree(adapter->msix_entries);
-       adapter->msix_entries = NULL;
-       return err;
-}
-
-/**
- * ixgbe_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
-                                   bool flush)
-{
-       u32 mask;
-
-       mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
-       if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
-               mask |= IXGBE_EIMS_GPI_SDP0;
-       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
-               mask |= IXGBE_EIMS_GPI_SDP1;
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82599EB:
-       case ixgbe_mac_X540:
-               mask |= IXGBE_EIMS_ECC;
-               mask |= IXGBE_EIMS_GPI_SDP1;
-               mask |= IXGBE_EIMS_GPI_SDP2;
-               if (adapter->num_vfs)
-                       mask |= IXGBE_EIMS_MAILBOX;
-               break;
-       default:
-               break;
+       err = request_irq(adapter->msix_entries[vector].vector,
+                         ixgbe_msix_other, 0, netdev->name, adapter);
+       if (err) {
+               e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
+               goto free_queue_irqs;
        }
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
-               mask |= IXGBE_EIMS_FLOW_DIR;
 
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       if (queues)
-               ixgbe_irq_enable_queues(adapter, ~0);
-       if (flush)
-               IXGBE_WRITE_FLUSH(&adapter->hw);
+       return 0;
 
-       if (adapter->num_vfs > 32) {
-               u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+free_queue_irqs:
+       while (vector) {
+               vector--;
+               irq_set_affinity_hint(adapter->msix_entries[vector].vector,
+                                     NULL);
+               free_irq(adapter->msix_entries[vector].vector,
+                        adapter->q_vector[vector]);
        }
+       adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+       pci_disable_msix(adapter->pdev);
+       kfree(adapter->msix_entries);
+       adapter->msix_entries = NULL;
+       return err;
 }
 
 /**
@@ -2488,14 +2214,26 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 
 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
 {
-       int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       int i;
+
+       /* legacy and MSI only use one vector */
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+               q_vectors = 1;
+
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               adapter->rx_ring[i]->q_vector = NULL;
+               adapter->rx_ring[i]->next = NULL;
+       }
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               adapter->tx_ring[i]->q_vector = NULL;
+               adapter->tx_ring[i]->next = NULL;
+       }
 
        for (i = 0; i < q_vectors; i++) {
                struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
-               bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES);
-               bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES);
-               q_vector->rx.count = 0;
-               q_vector->tx.count = 0;
+               memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
+               memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
        }
 }
 
@@ -2511,19 +2249,25 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        int err;
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+       /* map all of the rings to the q_vectors */
+       ixgbe_map_rings_to_vectors(adapter);
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
                err = ixgbe_request_msix_irqs(adapter);
-       } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+       else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
                err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
                                  netdev->name, adapter);
-       } else {
+       else
                err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
                                  netdev->name, adapter);
-       }
 
-       if (err)
+       if (err) {
                e_err(probe, "request_irq failed, Error %d\n", err);
 
+               /* place q_vectors and rings back into a known good state */
+               ixgbe_reset_q_vectors(adapter);
+       }
+
        return err;
 }
 
@@ -2533,25 +2277,29 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
                int i, q_vectors;
 
                q_vectors = adapter->num_msix_vectors;
-
                i = q_vectors - 1;
                free_irq(adapter->msix_entries[i].vector, adapter);
-
                i--;
+
                for (; i >= 0; i--) {
                        /* free only the irqs that were actually requested */
-                       if (!adapter->q_vector[i]->rx.count &&
-                           !adapter->q_vector[i]->tx.count)
+                       if (!adapter->q_vector[i]->rx.ring &&
+                           !adapter->q_vector[i]->tx.ring)
                                continue;
 
+                       /* clear the affinity_mask in the IRQ descriptor */
+                       irq_set_affinity_hint(adapter->msix_entries[i].vector,
+                                             NULL);
+
                        free_irq(adapter->msix_entries[i].vector,
                                 adapter->q_vector[i]);
                }
-
-               ixgbe_reset_q_vectors(adapter);
        } else {
                free_irq(adapter->pdev->irq, adapter);
        }
+
+       /* clear q_vector state information */
+       ixgbe_reset_q_vectors(adapter);
 }
 
 /**
@@ -2569,8 +2317,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
-               if (adapter->num_vfs > 32)
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
                break;
        default:
                break;
@@ -2599,9 +2345,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
        ixgbe_set_ivar(adapter, 0, 0, 0);
        ixgbe_set_ivar(adapter, 1, 0, 0);
 
-       map_vector_to_rxq(adapter, 0, 0);
-       map_vector_to_txq(adapter, 0, 0);
-
        e_info(hw, "Legacy interrupt IVAR setup done\n");
 }
 
@@ -2618,13 +2361,11 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        u64 tdba = ring->dma;
        int wait_loop = 10;
-       u32 txdctl;
+       u32 txdctl = IXGBE_TXDCTL_ENABLE;
        u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
-       txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
-       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
-                       txdctl & ~IXGBE_TXDCTL_ENABLE);
+       IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
        IXGBE_WRITE_FLUSH(hw);
 
        IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
@@ -2636,18 +2377,22 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
        ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 
-       /* configure fetching thresholds */
-       if (adapter->rx_itr_setting == 0) {
-               /* cannot set wthresh when itr==0 */
-               txdctl &= ~0x007F0000;
-       } else {
-               /* enable WTHRESH=8 descriptors, to encourage burst writeback */
-               txdctl |= (8 << 16);
-       }
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               /* PThresh workaround for Tx hang with DFP enabled. */
-               txdctl |= 32;
-       }
+       /*
+        * set WTHRESH to encourage burst writeback, it should not be set
+        * higher than 1 when ITR is 0 as it could cause false TX hangs
+        *
+        * In order to avoid issues WTHRESH + PTHRESH should always be equal
+        * to or less than the number of on chip descriptors, which is
+        * currently 40.
+        */
+       if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
+               txdctl |= (1 << 16);    /* WTHRESH = 1 */
+       else
+               txdctl |= (8 << 16);    /* WTHRESH = 8 */
+
+       /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
+       txdctl |= (1 << 8) |    /* HTHRESH = 1 */
+                  32;          /* PTHRESH = 32 */
 
        /* reinitialize flowdirector state */
        if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
@@ -2662,7 +2407,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
 
        /* enable queue */
-       txdctl |= IXGBE_TXDCTL_ENABLE;
        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
 
        /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
@@ -3534,19 +3278,8 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
                q_vectors = 1;
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               struct napi_struct *napi;
                q_vector = adapter->q_vector[q_idx];
-               napi = &q_vector->napi;
-               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-                       if (!q_vector->rx.count || !q_vector->tx.count) {
-                               if (q_vector->tx.count == 1)
-                                       napi->poll = &ixgbe_clean_txonly;
-                               else if (q_vector->rx.count == 1)
-                                       napi->poll = &ixgbe_clean_rxonly;
-                       }
-               }
-
-               napi_enable(napi);
+               napi_enable(&q_vector->napi);
        }
 }
 
@@ -3597,7 +3330,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 
        /* reconfigure the hardware */
        if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
-#ifdef CONFIG_FCOE
+#ifdef IXGBE_FCOE
                if (adapter->netdev->features & NETIF_F_FCOE_MTU)
                        max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
@@ -4101,7 +3834,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
        int i;
-       int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBE_DOWN, &adapter->state);
@@ -4133,26 +3865,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
        del_timer_sync(&adapter->service_timer);
 
-       /* disable receive for all VFs and wait one second */
        if (adapter->num_vfs) {
-               /* ping all the active vfs to let them know we are going down */
-               ixgbe_ping_all_vfs(adapter);
-
-               /* Disable all VFTE/VFRE TX/RX */
-               ixgbe_disable_tx_rx(adapter);
+               /* Clear EITR Select mapping */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
 
                /* Mark all the VFs as inactive */
                for (i = 0 ; i < adapter->num_vfs; i++)
                        adapter->vfinfo[i].clear_to_send = 0;
-       }
 
-       /* Cleanup the affinity_hint CPU mask memory and callback */
-       for (i = 0; i < num_q_vectors; i++) {
-               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
-               /* clear the affinity_mask in the IRQ descriptor */
-               irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
-               /* release the CPU mask memory */
-               free_cpumask_var(q_vector->affinity_mask);
+               /* ping all the active vfs to let them know we are going down */
+               ixgbe_ping_all_vfs(adapter);
+
+               /* Disable all VFTE/VFRE TX/RX */
+               ixgbe_disable_tx_rx(adapter);
        }
 
        /* disable transmits in the hardware now that interrupts are off */
@@ -4204,28 +3929,41 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
        struct ixgbe_q_vector *q_vector =
                                container_of(napi, struct ixgbe_q_vector, napi);
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       int tx_clean_complete, work_done = 0;
+       struct ixgbe_ring *ring;
+       int per_ring_budget;
+       bool clean_complete = true;
 
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                ixgbe_update_dca(q_vector);
 #endif
 
-       tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
-       ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
+       for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+               clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+
+       /* attempt to distribute budget to each queue fairly, but don't allow
+        * the budget to go below 1 because we'll exit polling */
+       if (q_vector->rx.count > 1)
+               per_ring_budget = max(budget/q_vector->rx.count, 1);
+       else
+               per_ring_budget = budget;
+
+       for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+               clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
+                                                    per_ring_budget);
 
-       if (!tx_clean_complete)
-               work_done = budget;
+       /* If all work not completed, return budget and keep polling */
+       if (!clean_complete)
+               return budget;
 
-       /* If budget not fully consumed, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->rx_itr_setting & 1)
-                       ixgbe_set_itr(q_vector);
-               if (!test_bit(__IXGBE_DOWN, &adapter->state))
-                       ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
-       }
-       return work_done;
+       /* all work done, exit the polling mode */
+       napi_complete(napi);
+       if (adapter->rx_itr_setting & 1)
+               ixgbe_set_itr(q_vector);
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+
+       return 0;
 }
 
 /**
@@ -4537,7 +4275,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               if (num_tcs == 8) {
+               if (num_tcs > 4) {
                        if (tc < 3) {
                                *tx = tc << 5;
                                *rx = tc << 4;
@@ -4548,7 +4286,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
                                *tx = ((tc + 8) << 3);
                                *rx = tc << 4;
                        }
-               } else if (num_tcs == 4) {
+               } else {
                        *rx =  tc << 5;
                        switch (tc) {
                        case 0:
@@ -4866,19 +4604,15 @@ out:
  **/
 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
 {
-       int q_idx, num_q_vectors;
+       int v_idx, num_q_vectors;
        struct ixgbe_q_vector *q_vector;
-       int (*poll)(struct napi_struct *, int);
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
                num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-               poll = &ixgbe_clean_rxtx_many;
-       } else {
+       else
                num_q_vectors = 1;
-               poll = &ixgbe_poll;
-       }
 
-       for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+       for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
                q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
                                        GFP_KERNEL, adapter->node);
                if (!q_vector)
@@ -4886,25 +4620,35 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
                                           GFP_KERNEL);
                if (!q_vector)
                        goto err_out;
+
                q_vector->adapter = adapter;
+               q_vector->v_idx = v_idx;
+
+               /* Allocate the affinity_hint cpumask, configure the mask */
+               if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+                       goto err_out;
+               cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+
                if (q_vector->tx.count && !q_vector->rx.count)
                        q_vector->eitr = adapter->tx_eitr_param;
                else
                        q_vector->eitr = adapter->rx_eitr_param;
-               q_vector->v_idx = q_idx;
-               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
-               adapter->q_vector[q_idx] = q_vector;
+
+               netif_napi_add(adapter->netdev, &q_vector->napi,
+                              ixgbe_poll, 64);
+               adapter->q_vector[v_idx] = q_vector;
        }
 
        return 0;
 
 err_out:
-       while (q_idx) {
-               q_idx--;
-               q_vector = adapter->q_vector[q_idx];
+       while (v_idx) {
+               v_idx--;
+               q_vector = adapter->q_vector[v_idx];
                netif_napi_del(&q_vector->napi);
+               free_cpumask_var(q_vector->affinity_mask);
                kfree(q_vector);
-               adapter->q_vector[q_idx] = NULL;
+               adapter->q_vector[v_idx] = NULL;
        }
        return -ENOMEM;
 }
@@ -4919,17 +4663,18 @@ err_out:
  **/
 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
 {
-       int q_idx, num_q_vectors;
+       int v_idx, num_q_vectors;
 
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
                num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
        else
                num_q_vectors = 1;
 
-       for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
-               struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
-               adapter->q_vector[q_idx] = NULL;
+       for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+               adapter->q_vector[v_idx] = NULL;
                netif_napi_del(&q_vector->napi);
+               free_cpumask_var(q_vector->affinity_mask);
                kfree(q_vector);
        }
 }
@@ -5147,7 +4892,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
        /* set default work limits */
-       adapter->tx_work_limit = adapter->tx_ring_count;
+       adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
 
        /* initialize eeprom parameters */
        if (ixgbe_init_eeprom_params_generic(hw)) {
@@ -5903,7 +5648,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
                /* get one bit for every active tx/rx interrupt vector */
                for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
                        struct ixgbe_q_vector *qv = adapter->q_vector[i];
-                       if (qv->rx.count || qv->tx.count)
+                       if (qv->rx.ring || qv->tx.ring)
                                eics |= ((u64)1 << i);
                }
        }
@@ -6351,7 +6096,8 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-           if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
+           if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+               !(tx_flags & IXGBE_TX_FLAGS_TXSW))
                        return false;
        } else {
                u8 l4_hdr = 0;
@@ -6408,185 +6154,185 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        return (skb->ip_summed == CHECKSUM_PARTIAL);
 }
 
-static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
-                       struct ixgbe_ring *tx_ring,
-                       struct sk_buff *skb, u32 tx_flags,
-                       unsigned int first, const u8 hdr_len)
+static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 {
-       struct device *dev = tx_ring->dev;
-       struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int len;
-       unsigned int total = skb->len;
-       unsigned int offset = 0, size, count = 0;
-       unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
-       unsigned int f;
-       unsigned int bytecount = skb->len;
-       u16 gso_segs = 1;
-       u16 i;
+       /* set type for advanced descriptor with frame checksum insertion */
+       __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+                                     IXGBE_ADVTXD_DCMD_IFCS |
+                                     IXGBE_ADVTXD_DCMD_DEXT);
 
-       i = tx_ring->next_to_use;
+       /* set HW vlan bit if vlan is present */
+       if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
+               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
 
-       if (tx_flags & IXGBE_TX_FLAGS_FCOE)
-               /* excluding fcoe_crc_eof for FCoE */
-               total -= sizeof(struct fcoe_crc_eof);
+       /* set segmentation enable bits for TSO/FSO */
+#ifdef IXGBE_FCOE
+       if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO))
+#else
+       if (tx_flags & IXGBE_TX_FLAGS_TSO)
+#endif
+               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
 
-       len = min(skb_headlen(skb), total);
-       while (len) {
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
-
-               tx_buffer_info->length = size;
-               tx_buffer_info->mapped_as_page = false;
-               tx_buffer_info->dma = dma_map_single(dev,
-                                                    skb->data + offset,
-                                                    size, DMA_TO_DEVICE);
-               if (dma_mapping_error(dev, tx_buffer_info->dma))
-                       goto dma_error;
-               tx_buffer_info->time_stamp = jiffies;
-               tx_buffer_info->next_to_watch = i;
+       return cmd_type;
+}
 
-               len -= size;
-               total -= size;
-               offset += size;
-               count++;
+static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
+{
+       __le32 olinfo_status =
+               cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
-               if (len) {
-                       i++;
-                       if (i == tx_ring->count)
-                               i = 0;
-               }
+       if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+               olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM |
+                                           (1 << IXGBE_ADVTXD_IDX_SHIFT));
+               /* enble IPv4 checksum for TSO */
+               if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+                       olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
        }
 
-       for (f = 0; f < nr_frags; f++) {
-               struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[f];
-               len = min((unsigned int)frag->size, total);
-               offset = frag->page_offset;
-
-               while (len) {
-                       i++;
-                       if (i == tx_ring->count)
-                               i = 0;
-
-                       tx_buffer_info = &tx_ring->tx_buffer_info[i];
-                       size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
-
-                       tx_buffer_info->length = size;
-                       tx_buffer_info->dma = dma_map_page(dev,
-                                                          frag->page,
-                                                          offset, size,
-                                                          DMA_TO_DEVICE);
-                       tx_buffer_info->mapped_as_page = true;
-                       if (dma_mapping_error(dev, tx_buffer_info->dma))
-                               goto dma_error;
-                       tx_buffer_info->time_stamp = jiffies;
-                       tx_buffer_info->next_to_watch = i;
-
-                       len -= size;
-                       total -= size;
-                       offset += size;
-                       count++;
-               }
-               if (total == 0)
-                       break;
-       }
+       /* enable L4 checksum for TSO and TX checksum offload */
+       if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+               olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
 
-       if (tx_flags & IXGBE_TX_FLAGS_TSO)
-               gso_segs = skb_shinfo(skb)->gso_segs;
 #ifdef IXGBE_FCOE
-       /* adjust for FCoE Sequence Offload */
-       else if (tx_flags & IXGBE_TX_FLAGS_FSO)
-               gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
-                                       skb_shinfo(skb)->gso_size);
-#endif /* IXGBE_FCOE */
-       bytecount += (gso_segs - 1) * hdr_len;
+       /* use index 1 context for FCOE/FSO */
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+               olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC |
+                                           (1 << IXGBE_ADVTXD_IDX_SHIFT));
 
-       /* multiply data chunks by size of headers */
-       tx_ring->tx_buffer_info[i].bytecount = bytecount;
-       tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
-       tx_ring->tx_buffer_info[i].skb = skb;
-       tx_ring->tx_buffer_info[first].next_to_watch = i;
+#endif
+       /*
+        * Check Context must be set if Tx switch is enabled, which it
+        * always is for case where virtual functions are running
+        */
+       if (tx_flags & IXGBE_TX_FLAGS_TXSW)
+               olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
 
-       return count;
+       return olinfo_status;
+}
 
-dma_error:
-       e_dev_err("TX DMA map failed\n");
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+                      IXGBE_TXD_CMD_RS)
 
-       /* clear timestamp and dma mappings for failed tx_buffer_info map */
-       tx_buffer_info->dma = 0;
-       tx_buffer_info->time_stamp = 0;
-       tx_buffer_info->next_to_watch = 0;
-       if (count)
-               count--;
+static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
+                        struct sk_buff *skb,
+                        struct ixgbe_tx_buffer *first,
+                        u32 tx_flags,
+                        const u8 hdr_len)
+{
+       struct device *dev = tx_ring->dev;
+       struct ixgbe_tx_buffer *tx_buffer_info;
+       union ixgbe_adv_tx_desc *tx_desc;
+       dma_addr_t dma;
+       __le32 cmd_type, olinfo_status;
+       struct skb_frag_struct *frag;
+       unsigned int f = 0;
+       unsigned int data_len = skb->data_len;
+       unsigned int size = skb_headlen(skb);
+       u32 offset = 0;
+       u32 paylen = skb->len - hdr_len;
+       u16 i = tx_ring->next_to_use;
+       u16 gso_segs;
 
-       /* clear timestamp and dma mappings for remaining portion of packet */
-       while (count--) {
-               if (i == 0)
-                       i += tx_ring->count;
-               i--;
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+#ifdef IXGBE_FCOE
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+               if (data_len >= sizeof(struct fcoe_crc_eof)) {
+                       data_len -= sizeof(struct fcoe_crc_eof);
+               } else {
+                       size -= sizeof(struct fcoe_crc_eof) - data_len;
+                       data_len = 0;
+               }
        }
 
-       return 0;
-}
+#endif
+       dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, dma))
+               goto dma_error;
 
-static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
-                          int tx_flags, int count, u32 paylen, u8 hdr_len)
-{
-       union ixgbe_adv_tx_desc *tx_desc = NULL;
-       struct ixgbe_tx_buffer *tx_buffer_info;
-       u32 olinfo_status = 0, cmd_type_len = 0;
-       unsigned int i;
-       u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+       cmd_type = ixgbe_tx_cmd_type(tx_flags);
+       olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
 
-       cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+       tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
 
-       cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+       for (;;) {
+               while (size > IXGBE_MAX_DATA_PER_TXD) {
+                       tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+                       tx_desc->read.cmd_type_len =
+                               cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+                       tx_desc->read.olinfo_status = olinfo_status;
 
-       if (tx_flags & IXGBE_TX_FLAGS_VLAN)
-               cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+                       offset += IXGBE_MAX_DATA_PER_TXD;
+                       size -= IXGBE_MAX_DATA_PER_TXD;
 
-       if (tx_flags & IXGBE_TX_FLAGS_TSO) {
-               cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+                       tx_desc++;
+                       i++;
+                       if (i == tx_ring->count) {
+                               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+                               i = 0;
+                       }
+               }
 
-               olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-                                IXGBE_ADVTXD_POPTS_SHIFT;
+               tx_buffer_info = &tx_ring->tx_buffer_info[i];
+               tx_buffer_info->length = offset + size;
+               tx_buffer_info->tx_flags = tx_flags;
+               tx_buffer_info->dma = dma;
 
-               /* use index 1 context for tso */
-               olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
-               if (tx_flags & IXGBE_TX_FLAGS_IPV4)
-                       olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
-                                        IXGBE_ADVTXD_POPTS_SHIFT;
+               tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+               tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+               tx_desc->read.olinfo_status = olinfo_status;
 
-       } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
-               olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-                                IXGBE_ADVTXD_POPTS_SHIFT;
+               if (!data_len)
+                       break;
 
-       if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
-               olinfo_status |= IXGBE_ADVTXD_CC;
-               olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
-               if (tx_flags & IXGBE_TX_FLAGS_FSO)
-                       cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
-       }
+               frag = &skb_shinfo(skb)->frags[f];
+#ifdef IXGBE_FCOE
+               size = min_t(unsigned int, data_len, frag->size);
+#else
+               size = frag->size;
+#endif
+               data_len -= size;
+               f++;
 
-       olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+               offset = 0;
+               tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
 
-       i = tx_ring->next_to_use;
-       while (count--) {
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
-               tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
-               tx_desc->read.cmd_type_len =
-                       cpu_to_le32(cmd_type_len | tx_buffer_info->length);
-               tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+               dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, dma))
+                       goto dma_error;
+
+               tx_desc++;
                i++;
-               if (i == tx_ring->count)
+               if (i == tx_ring->count) {
+                       tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
                        i = 0;
+               }
        }
 
-       tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+       tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
+
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       tx_ring->next_to_use = i;
+
+       if (tx_flags & IXGBE_TX_FLAGS_TSO)
+               gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+       /* adjust for FCoE Sequence Offload */
+       else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+               gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+                                       skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+       else
+               gso_segs = 1;
+
+       /* multiply data chunks by size of headers */
+       tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
+       tx_buffer_info->gso_segs = gso_segs;
+       tx_buffer_info->skb = skb;
+
+       /* set the timestamp */
+       first->time_stamp = jiffies;
 
        /*
         * Force memory writes to complete before letting h/w
@@ -6596,8 +6342,30 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
         */
        wmb();
 
-       tx_ring->next_to_use = i;
+       /* set next_to_watch value indicating a packet is present */
+       first->next_to_watch = tx_desc;
+
+       /* notify HW of packet */
        writel(i, tx_ring->tail);
+
+       return;
+dma_error:
+       dev_err(dev, "TX DMA map failed\n");
+
+       /* clear dma mappings for failed tx_buffer_info map */
+       for (;;) {
+               tx_buffer_info = &tx_ring->tx_buffer_info[i];
+               ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+               if (tx_buffer_info == first)
+                       break;
+               if (i == 0)
+                       i = tx_ring->count;
+               i--;
+       }
+
+       dev_kfree_skb_any(skb);
+
+       tx_ring->next_to_use = i;
 }
 
 static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
@@ -6636,8 +6404,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
 
        th = tcp_hdr(skb);
 
-       /* skip this packet since the socket is closing */
-       if (th->fin)
+       /* skip this packet since it is invalid or the socket is closing */
+       if (!th || th->fin)
                return;
 
        /* sample on all syn packets or once every atr sample count */
@@ -6662,7 +6430,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
         * since src port and flex bytes occupy the same word XOR them together
         * and write the value to source port portion of compressed dword
         */
-       if (vlan_id)
+       if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
                common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
        else
                common.port.src ^= th->dest ^ protocol;
@@ -6744,14 +6512,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
 {
+       struct ixgbe_tx_buffer *first;
        int tso;
-       u32  tx_flags = 0;
+       u32 tx_flags = 0;
 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
        unsigned short f;
 #endif
-       u16 first;
        u16 count = TXD_USE_COUNT(skb_headlen(skb));
-       __be16 protocol;
+       __be16 protocol = skb->protocol;
        u8 hdr_len = 0;
 
        /*
@@ -6772,68 +6540,88 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       protocol = vlan_get_protocol(skb);
+#ifdef CONFIG_PCI_IOV
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               tx_flags |= IXGBE_TX_FLAGS_TXSW;
 
+#endif
+       /* if we have a HW VLAN tag being added default to the HW one */
        if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb);
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
-                       tx_flags |= tx_ring->dcb_tc << 13;
+               tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+               tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
+       /* else if it is a SW VLAN check the next protocol and store the tag */
+       } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+               struct vlan_hdr *vhdr, _vhdr;
+               vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+               if (!vhdr)
+                       goto out_drop;
+
+               protocol = vhdr->h_vlan_encapsulated_proto;
+               tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+               tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
+       }
+
+       if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+           ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
+            (skb->priority != TC_PRIO_CONTROL))) {
+               tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
+               tx_flags |= tx_ring->dcb_tc <<
+                           IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
+               if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
+                       struct vlan_ethhdr *vhdr;
+                       if (skb_header_cloned(skb) &&
+                           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+                               goto out_drop;
+                       vhdr = (struct vlan_ethhdr *)skb->data;
+                       vhdr->h_vlan_TCI = htons(tx_flags >>
+                                                IXGBE_TX_FLAGS_VLAN_SHIFT);
+               } else {
+                       tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
                }
-               tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
-               tx_flags |= IXGBE_TX_FLAGS_VLAN;
-       } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
-                  skb->priority != TC_PRIO_CONTROL) {
-               tx_flags |= tx_ring->dcb_tc << 13;
-               tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
-               tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
 
-#ifdef IXGBE_FCOE
-       /* for FCoE with DCB, we force the priority to what
-        * was specified by the switch */
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
-           (protocol == htons(ETH_P_FCOE)))
-               tx_flags |= IXGBE_TX_FLAGS_FCOE;
-
-#endif
        /* record the location of the first descriptor for this packet */
-       first = tx_ring->next_to_use;
+       first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
 
-       if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
 #ifdef IXGBE_FCOE
-               /* setup tx offload for FCoE */
+       /* setup tx offload for FCoE */
+       if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+           (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
                tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
                if (tso < 0)
                        goto out_drop;
                else if (tso)
-                       tx_flags |= IXGBE_TX_FLAGS_FSO;
-#endif /* IXGBE_FCOE */
-       } else {
-               if (protocol == htons(ETH_P_IP))
-                       tx_flags |= IXGBE_TX_FLAGS_IPV4;
-               tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
-               if (tso < 0)
-                       goto out_drop;
-               else if (tso)
-                       tx_flags |= IXGBE_TX_FLAGS_TSO;
-               else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
-                       tx_flags |= IXGBE_TX_FLAGS_CSUM;
+                       tx_flags |= IXGBE_TX_FLAGS_FSO |
+                                   IXGBE_TX_FLAGS_FCOE;
+               else
+                       tx_flags |= IXGBE_TX_FLAGS_FCOE;
+
+               goto xmit_fcoe;
        }
 
-       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
-       if (count) {
-               /* add the ATR filter if ATR is on */
-               if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
-                       ixgbe_atr(tx_ring, skb, tx_flags, protocol);
-               ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
-               ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+#endif /* IXGBE_FCOE */
+       /* setup IPv4/IPv6 offloads */
+       if (protocol == __constant_htons(ETH_P_IP))
+               tx_flags |= IXGBE_TX_FLAGS_IPV4;
 
-       } else {
-               tx_ring->tx_buffer_info[first].time_stamp = 0;
-               tx_ring->next_to_use = first;
+       tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+       if (tso < 0)
                goto out_drop;
-       }
+       else if (tso)
+               tx_flags |= IXGBE_TX_FLAGS_TSO;
+       else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
+               tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+       /* add the ATR filter if ATR is on */
+       if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+               ixgbe_atr(tx_ring, skb, tx_flags, protocol);
+
+#ifdef IXGBE_FCOE
+xmit_fcoe:
+#endif /* IXGBE_FCOE */
+       ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
+
+       ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        return NETDEV_TX_OK;
 
@@ -6972,7 +6760,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
                int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
                for (i = 0; i < num_q_vectors; i++) {
                        struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
-                       ixgbe_msix_clean_many(0, q_vector);
+                       ixgbe_msix_clean_rings(0, q_vector);
                }
        } else {
                ixgbe_intr(adapter->pdev->irq, netdev);
@@ -7077,11 +6865,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       /* If DCB is anabled do not remove traffic classes, multiple
-        * traffic classes are required to implement DCB
-        */
-       if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
-               return 0;
+       /* Multiple traffic classes requires multiple queues */
+       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+               e_err(drv, "Enable failed, needs MSI-X\n");
+               return -EINVAL;
+       }
 
        /* Hardware supports up to 8 traffic classes */
        if (tc > MAX_TRAFFIC_CLASS ||
@@ -7096,11 +6884,27 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
                ixgbe_close(dev);
        ixgbe_clear_interrupt_scheme(adapter);
 
-       if (tc)
+       if (tc) {
                netdev_set_num_tc(dev, tc);
-       else
+               adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+
+               adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
+               adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+               if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+                       adapter->hw.fc.requested_mode = ixgbe_fc_none;
+       } else {
                netdev_reset_tc(dev);
 
+               adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+               adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+               adapter->temp_dcb_cfg.pfc_mode_enable = false;
+               adapter->dcb_cfg.pfc_mode_enable = false;
+       }
+
        ixgbe_init_interrupt_scheme(adapter);
        ixgbe_validate_rtr(adapter, tc);
        if (netif_running(dev))
@@ -7506,7 +7310,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                           NETIF_F_HW_VLAN_FILTER |
                           NETIF_F_TSO |
                           NETIF_F_TSO6 |
-                          NETIF_F_GRO |
                           NETIF_F_RXHASH |
                           NETIF_F_RXCSUM;