Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
index 5b8c0b6..459614d 100644 (file)
  *
  */
 
-
 #include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
 #include <linux/firmware.h>
 #include "bnx2x_cmn.h"
 
-#ifdef BCM_VLAN
-#include <linux/if_vlan.h>
-#endif
-
 #include "bnx2x_init.h"
 
-static int bnx2x_poll(struct napi_struct *napi, int budget);
+static int bnx2x_setup_irqs(struct bnx2x *bp);
 
 /* free skb in the packet ring at pos idx
  * return idx of last bd freed
@@ -54,7 +50,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
        tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
-                        BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
+                        BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 
        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 #ifdef BNX2X_STOP_ON_ERROR
@@ -137,7 +133,6 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
         */
        smp_mb();
 
-       /* TBD need a thresh? */
        if (unlikely(netif_tx_queue_stopped(txq))) {
                /* Taking tx_lock() is needed to prevent reenabling the queue
                 * while it's empty. This could have happen if rx_action() gets
@@ -349,16 +344,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        if (likely(new_skb)) {
                /* fix ip xsum and give it to the stack */
                /* (no need to map the new skb) */
-#ifdef BCM_VLAN
-               int is_vlan_cqe =
-                       (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-                        PARSING_FLAGS_VLAN);
-               int is_not_hwaccel_vlan_cqe =
-                       (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
-#endif
 
                prefetch(skb);
-               prefetch(((char *)(skb)) + 128);
+               prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
 #ifdef BNX2X_STOP_ON_ERROR
                if (pad + len > bp->rx_buf_size) {
@@ -380,28 +368,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        struct iphdr *iph;
 
                        iph = (struct iphdr *)skb->data;
-#ifdef BCM_VLAN
-                       /* If there is no Rx VLAN offloading -
-                          take VLAN tag into an account */
-                       if (unlikely(is_not_hwaccel_vlan_cqe))
-                               iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
-#endif
                        iph->check = 0;
                        iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
                }
 
                if (!bnx2x_fill_frag_skb(bp, fp, skb,
                                         &cqe->fast_path_cqe, cqe_idx)) {
-#ifdef BCM_VLAN
-                       if ((bp->vlgrp != NULL) &&
-                               (le16_to_cpu(cqe->fast_path_cqe.
-                               pars_flags.flags) & PARSING_FLAGS_VLAN))
-                               vlan_gro_receive(&fp->napi, bp->vlgrp,
+                       if ((le16_to_cpu(cqe->fast_path_cqe.
+                           pars_flags.flags) & PARSING_FLAGS_VLAN))
+                               __vlan_hwaccel_put_tag(skb,
                                                 le16_to_cpu(cqe->fast_path_cqe.
-                                                            vlan_tag), skb);
-                       else
-#endif
-                               napi_gro_receive(&fp->napi, skb);
+                                                            vlan_tag));
+                       napi_gro_receive(&fp->napi, skb);
                } else {
                        DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
                           " - dropping packet!\n");
@@ -510,8 +488,11 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                        len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
                        pad = cqe->fast_path_cqe.placement_offset;
 
-                       /* If CQE is marked both TPA_START and TPA_END
-                          it is a non-TPA CQE */
+                       /* - If CQE is marked both TPA_START and TPA_END it is
+                        *   a non-TPA CQE.
+                        * - FP CQE will always have either TPA_START or/and
+                        *   TPA_STOP flags set.
+                        */
                        if ((!fp->disable_tpa) &&
                            (TPA_TYPE(cqe_fp_flags) !=
                                        (TPA_TYPE_START | TPA_TYPE_END))) {
@@ -529,9 +510,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                                        bnx2x_set_skb_rxhash(bp, cqe, skb);
 
                                        goto next_rx;
-                               }
-
-                               if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
+                               } else { /* TPA_STOP */
                                        DP(NETIF_MSG_RX_STATUS,
                                           "calling tpa_stop on queue %d\n",
                                           queue);
@@ -561,7 +540,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                                        dma_unmap_addr(rx_buf, mapping),
                                                   pad + RX_COPY_THRESH,
                                                   DMA_FROM_DEVICE);
-                       prefetch(((char *)(skb)) + 128);
+                       prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
                        /* is this an error packet? */
                        if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
@@ -624,6 +603,7 @@ reuse_rx:
                        bnx2x_set_skb_rxhash(bp, cqe, skb);
 
                        skb_checksum_none_assert(skb);
+
                        if (bp->rx_csum) {
                                if (likely(BNX2X_RX_CSUM_OK(cqe)))
                                        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -634,15 +614,11 @@ reuse_rx:
 
                skb_record_rx_queue(skb, fp->index);
 
-#ifdef BCM_VLAN
-               if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
-                   (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-                    PARSING_FLAGS_VLAN))
-                       vlan_gro_receive(&fp->napi, bp->vlgrp,
-                               le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
-               else
-#endif
-                       napi_gro_receive(&fp->napi, skb);
+               if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
+                    PARSING_FLAGS_VLAN)
+                       __vlan_hwaccel_put_tag(skb,
+                               le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+               napi_gro_receive(&fp->napi, skb);
 
 
 next_rx:
@@ -705,7 +681,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
        return IRQ_HANDLED;
 }
 
-
 /* HW Lock for shared dual port PHYs */
 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
 {
@@ -833,7 +808,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
        int i, j;
 
        bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
-               BNX2X_FW_IP_HDR_ALIGN_PAD;
+               IP_HEADER_ALIGNMENT_PADDING;
 
        DP(NETIF_MSG_IFUP,
           "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
@@ -917,6 +892,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                }
        }
 }
+
 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
 {
        int i;
@@ -989,55 +965,49 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
        }
 }
 
-void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
+void bnx2x_free_irq(struct bnx2x *bp)
 {
-       if (bp->flags & USING_MSIX_FLAG) {
-               if (!disable_only)
-                       bnx2x_free_msix_irqs(bp);
-               pci_disable_msix(bp->pdev);
-               bp->flags &= ~USING_MSIX_FLAG;
-
-       } else if (bp->flags & USING_MSI_FLAG) {
-               if (!disable_only)
-                       free_irq(bp->pdev->irq, bp->dev);
-               pci_disable_msi(bp->pdev);
-               bp->flags &= ~USING_MSI_FLAG;
-
-       } else if (!disable_only)
+       if (bp->flags & USING_MSIX_FLAG)
+               bnx2x_free_msix_irqs(bp);
+       else if (bp->flags & USING_MSI_FLAG)
+               free_irq(bp->pdev->irq, bp->dev);
+       else
                free_irq(bp->pdev->irq, bp->dev);
 }
 
-static int bnx2x_enable_msix(struct bnx2x *bp)
+int bnx2x_enable_msix(struct bnx2x *bp)
 {
-       int i, rc, offset = 1;
-       int igu_vec = 0;
+       int msix_vec = 0, i, rc, req_cnt;
 
-       bp->msix_table[0].entry = igu_vec;
-       DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
+       bp->msix_table[msix_vec].entry = msix_vec;
+       DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+          bp->msix_table[0].entry);
+       msix_vec++;
 
 #ifdef BCM_CNIC
-       igu_vec = BP_L_ID(bp) + offset;
-       bp->msix_table[1].entry = igu_vec;
-       DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
-       offset++;
+       bp->msix_table[msix_vec].entry = msix_vec;
+       DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+          bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
+       msix_vec++;
 #endif
        for_each_queue(bp, i) {
-               igu_vec = BP_L_ID(bp) + offset + i;
-               bp->msix_table[i + offset].entry = igu_vec;
+               bp->msix_table[msix_vec].entry = msix_vec;
                DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
-                  "(fastpath #%u)\n", i + offset, igu_vec, i);
+                  "(fastpath #%u)\n", msix_vec, msix_vec, i);
+               msix_vec++;
        }
 
-       rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
-                            BNX2X_NUM_QUEUES(bp) + offset);
+       req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
+
+       rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
 
        /*
         * reconfigure number of tx/rx queues according to available
         * MSI-X vectors
         */
        if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
-               /* vectors available for FP */
-               int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+               /* how less vectors we will have? */
+               int diff = req_cnt - rc;
 
                DP(NETIF_MSG_IFUP,
                   "Trying to use less MSI-X vectors: %d\n", rc);
@@ -1049,12 +1019,17 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
                           "MSI-X is not attainable  rc %d\n", rc);
                        return rc;
                }
-
-               bp->num_queues = min(bp->num_queues, fp_vec);
+               /*
+                * decrease number of queues by number of unallocated entries
+                */
+               bp->num_queues -= diff;
 
                DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
                                  bp->num_queues);
        } else if (rc) {
+               /* fall to INTx if not enough memory */
+               if (rc == -ENOMEM)
+                       bp->flags |= DISABLE_MSI_FLAG;
                DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
                return rc;
        }
@@ -1083,7 +1058,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
                         bp->dev->name, i);
 
-               rc = request_irq(bp->msix_table[i + offset].vector,
+               rc = request_irq(bp->msix_table[offset].vector,
                                 bnx2x_msix_fp_int, 0, fp->name, fp);
                if (rc) {
                        BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
@@ -1091,10 +1066,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
                        return -EBUSY;
                }
 
+               offset++;
                fp->state = BNX2X_FP_STATE_IRQ;
        }
 
        i = BNX2X_NUM_QUEUES(bp);
+       offset = 1 + CNIC_CONTEXT_USE;
        netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
               " ... fp[%d] %d\n",
               bp->msix_table[0].vector,
@@ -1104,7 +1081,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
        return 0;
 }
 
-static int bnx2x_enable_msi(struct bnx2x *bp)
+int bnx2x_enable_msi(struct bnx2x *bp)
 {
        int rc;
 
@@ -1175,44 +1152,21 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
        bnx2x_napi_disable(bp);
        netif_tx_disable(bp->dev);
 }
-static int bnx2x_set_num_queues(struct bnx2x *bp)
-{
-       int rc = 0;
 
-       switch (bp->int_mode) {
-       case INT_MODE_MSI:
-               bnx2x_enable_msi(bp);
-               /* falling through... */
-       case INT_MODE_INTx:
+void bnx2x_set_num_queues(struct bnx2x *bp)
+{
+       switch (bp->multi_mode) {
+       case ETH_RSS_MODE_DISABLED:
                bp->num_queues = 1;
-               DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
                break;
-       default:
-               /* Set number of queues according to bp->multi_mode value */
-               bnx2x_set_num_queues_msix(bp);
-
-               DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
-                  bp->num_queues);
-
-               /* if we can't use MSI-X we only need one fp,
-                * so try to enable MSI-X with the requested number of fp's
-                * and fallback to MSI or legacy INTx with one fp
-                */
-               rc = bnx2x_enable_msix(bp);
-               if (rc) {
-                       /* failed to enable MSI-X */
-                       bp->num_queues = 1;
-
-                       /* Fall to INTx if failed to enable MSI-X due to lack of
-                        * memory (in bnx2x_set_num_queues()) */
-                       if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
-                               bnx2x_enable_msi(bp);
-               }
+       case ETH_RSS_MODE_REGULAR:
+               bp->num_queues = bnx2x_calc_num_queues(bp);
+               break;
 
+       default:
+               bp->num_queues = 1;
                break;
        }
-       netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
-       return netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
 }
 
 static void bnx2x_release_firmware(struct bnx2x *bp)
@@ -1243,49 +1197,25 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
 
-       rc = bnx2x_set_num_queues(bp);
-       if (rc)
-               return rc;
-
        /* must be called before memory allocation and HW init */
        bnx2x_ilt_set_info(bp);
 
-       if (bnx2x_alloc_mem(bp)) {
-               bnx2x_free_irq(bp, true);
+       if (bnx2x_alloc_mem(bp))
                return -ENOMEM;
+
+       netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
+       rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
+       if (rc) {
+               BNX2X_ERR("Unable to update real_num_rx_queues\n");
+               goto load_error0;
        }
 
        for_each_queue(bp, i)
                bnx2x_fp(bp, i, disable_tpa) =
                                        ((bp->flags & TPA_ENABLE_FLAG) == 0);
 
-       for_each_queue(bp, i)
-               netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
-                              bnx2x_poll, 128);
-
        bnx2x_napi_enable(bp);
 
-       if (bp->flags & USING_MSIX_FLAG) {
-               rc = bnx2x_req_msix_irqs(bp);
-               if (rc) {
-                       bnx2x_free_irq(bp, true);
-                       goto load_error1;
-               }
-       } else {
-               bnx2x_ack_int(bp);
-               rc = bnx2x_req_irq(bp);
-               if (rc) {
-                       BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
-                       bnx2x_free_irq(bp, true);
-                       goto load_error1;
-               }
-               if (bp->flags & USING_MSI_FLAG) {
-                       bp->dev->irq = bp->pdev->irq;
-                       netdev_info(bp->dev, "using MSI  IRQ %d\n",
-                                   bp->pdev->irq);
-               }
-       }
-
        /* Send LOAD_REQUEST command to MCP
           Returns the type of LOAD command:
           if it is the first port to be initialized
@@ -1296,11 +1226,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                if (!load_code) {
                        BNX2X_ERR("MCP response failure, aborting\n");
                        rc = -EBUSY;
-                       goto load_error2;
+                       goto load_error1;
                }
                if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
                        rc = -EBUSY; /* other port in diagnostic mode */
-                       goto load_error2;
+                       goto load_error1;
                }
 
        } else {
@@ -1336,11 +1266,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        if (rc) {
                BNX2X_ERR("HW init failed, aborting\n");
                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
                goto load_error2;
        }
 
+       /* Connect to IRQs */
+       rc = bnx2x_setup_irqs(bp);
        if (rc) {
                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
                goto load_error2;
@@ -1400,6 +1330,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* Enable Timer scan */
        REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
 #endif
+
        for_each_nondefault_queue(bp, i) {
                rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
                if (rc)
@@ -1415,19 +1346,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        bnx2x_set_eth_mac(bp, 1);
 
-#ifdef BCM_CNIC
-       /* Set iSCSI L2 MAC */
-       mutex_lock(&bp->cnic_mutex);
-       if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
-               bnx2x_set_iscsi_eth_mac_addr(bp, 1);
-               bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
-               bnx2x_init_sb(bp, bp->cnic_sb_mapping,
-                             BNX2X_VF_ID_INVALID, false,
-                             CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
-       }
-       mutex_unlock(&bp->cnic_mutex);
-#endif
-
        if (bp->port.pmf)
                bnx2x_initial_phy_init(bp, load_mode);
 
@@ -1481,22 +1399,24 @@ load_error4:
 #endif
 load_error3:
        bnx2x_int_disable_sync(bp, 1);
-       if (!BP_NOMCP(bp)) {
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
-       }
-       bp->port.pmf = 0;
+
        /* Free SKBs, SGEs, TPA pool and driver internals */
        bnx2x_free_skbs(bp);
        for_each_queue(bp, i)
                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-load_error2:
+
        /* Release IRQs */
-       bnx2x_free_irq(bp, false);
+       bnx2x_free_irq(bp);
+load_error2:
+       if (!BP_NOMCP(bp)) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+       }
+
+       bp->port.pmf = 0;
 load_error1:
        bnx2x_napi_disable(bp);
-       for_each_queue(bp, i)
-               netif_napi_del(&bnx2x_fp(bp, i, napi));
+load_error0:
        bnx2x_free_mem(bp);
 
        bnx2x_release_firmware(bp);
@@ -1530,11 +1450,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
        /* Stop Tx */
        bnx2x_tx_disable(bp);
+
        del_timer_sync(&bp->timer);
+
        SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
                 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
-       bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
+       bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
        /* Cleanup the chip if needed */
        if (unload_mode != UNLOAD_RECOVERY)
@@ -1544,7 +1466,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
                bnx2x_netif_stop(bp, 1);
 
                /* Release IRQs */
-               bnx2x_free_irq(bp, false);
+               bnx2x_free_irq(bp);
        }
 
        bp->port.pmf = 0;
@@ -1553,8 +1475,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
        bnx2x_free_skbs(bp);
        for_each_queue(bp, i)
                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-       for_each_queue(bp, i)
-               netif_napi_del(&bnx2x_fp(bp, i, napi));
+
        bnx2x_free_mem(bp);
 
        bp->state = BNX2X_STATE_CLOSED;
@@ -1572,10 +1493,17 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
        return 0;
 }
+
 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
 {
        u16 pmcsr;
 
+       /* If there is no power capability, silently succeed */
+       if (!bp->pm_cap) {
+               DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+               return 0;
+       }
+
        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
 
        switch (state) {
@@ -1618,13 +1546,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
        return 0;
 }
 
-
-
 /*
  * net_device service functions
  */
-
-static int bnx2x_poll(struct napi_struct *napi, int budget)
+int bnx2x_poll(struct napi_struct *napi, int budget)
 {
        int work_done = 0;
        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
@@ -1653,19 +1578,19 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
                /* Fall out from the NAPI loop if needed */
                if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
                        bnx2x_update_fpsb_idx(fp);
-               /* bnx2x_has_rx_work() reads the status block,
-                * thus we need to ensure that status block indices
-                * have been actually read (bnx2x_update_fpsb_idx)
-                * prior to this check (bnx2x_has_rx_work) so that
-                * we won't write the "newer" value of the status block
-                * to IGU (if there was a DMA right after
-                * bnx2x_has_rx_work and if there is no rmb, the memory
-                * reading (bnx2x_update_fpsb_idx) may be postponed
-                * to right before bnx2x_ack_sb). In this case there
-                * will never be another interrupt until there is
-                * another update of the status block, while there
-                * is still unhandled work.
-                */
+                       /* bnx2x_has_rx_work() reads the status block,
+                        * thus we need to ensure that status block indices
+                        * have been actually read (bnx2x_update_fpsb_idx)
+                        * prior to this check (bnx2x_has_rx_work) so that
+                        * we won't write the "newer" value of the status block
+                        * to IGU (if there was a DMA right after
+                        * bnx2x_has_rx_work and if there is no rmb, the memory
+                        * reading (bnx2x_update_fpsb_idx) may be postponed
+                        * to right before bnx2x_ack_sb). In this case there
+                        * will never be another interrupt until there is
+                        * another update of the status block, while there
+                        * is still unhandled work.
+                        */
                        rmb();
 
                        if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
@@ -1684,7 +1609,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-
 /* we split the first BD into headers and data BDs
  * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
@@ -1900,6 +1824,7 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
 
        pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
 }
+
 /**
  *
  * @param skb
@@ -1972,6 +1897,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 
        return hlen;
 }
+
 /* called with netif_tx_lock
  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
  * netif_wake_queue()
@@ -2061,13 +1987,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
 
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-       SET_FLAG(tx_start_bd->general_data,
-                 ETH_TX_START_BD_ETH_ADDR_TYPE,
-                 mac_type);
+       SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
+                mac_type);
+
        /* header nbd */
-       SET_FLAG(tx_start_bd->general_data,
-                 ETH_TX_START_BD_HDR_NBDS,
-                 1);
+       SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
 
        /* remember the first BD of the packet */
        tx_buf->first_bd = fp->tx_bd_prod;
@@ -2078,15 +2002,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
           pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
 
-#ifdef BCM_VLAN
-       if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
-           (bp->flags & HW_VLAN_TX_FLAG)) {
+       if (vlan_tx_tag_present(skb)) {
                tx_start_bd->vlan_or_ethertype =
                    cpu_to_le16(vlan_tx_tag_get(skb));
                tx_start_bd->bd_flags.as_bitfield |=
                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
        } else
-#endif
                tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
 
        /* turn on parsing and get a BD */
@@ -2123,9 +2044,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        }
 
+       /* Map skb linear data for DMA */
        mapping = dma_map_single(&bp->pdev->dev, skb->data,
                                 skb_headlen(skb), DMA_TO_DEVICE);
 
+       /* Setup the data pointer of the first BD of the packet */
        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
        nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
@@ -2159,6 +2082,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
 
+       /* Handle fragmented skb */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
@@ -2223,6 +2147,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        fp->tx_db.data.prod += nbd;
        barrier();
+
        DOORBELL(bp, fp->cid, fp->tx_db.raw);
 
        mmiowb();
@@ -2245,6 +2170,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        return NETDEV_TX_OK;
 }
+
 /* called with rtnl_lock */
 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
 {
@@ -2261,6 +2187,31 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
        return 0;
 }
 
+
+static int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+       int rc = 0;
+       if (bp->flags & USING_MSIX_FLAG) {
+               rc = bnx2x_req_msix_irqs(bp);
+               if (rc)
+                       return rc;
+       } else {
+               bnx2x_ack_int(bp);
+               rc = bnx2x_req_irq(bp);
+               if (rc) {
+                       BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
+                       return rc;
+               }
+               if (bp->flags & USING_MSI_FLAG) {
+                       bp->dev->irq = bp->pdev->irq;
+                       netdev_info(bp->dev, "using MSI  IRQ %d\n",
+                              bp->pdev->irq);
+               }
+       }
+
+       return 0;
+}
+
 void bnx2x_free_mem_bp(struct bnx2x *bp)
 {
        kfree(bp->fp);
@@ -2341,17 +2292,6 @@ void bnx2x_tx_timeout(struct net_device *dev)
        schedule_delayed_work(&bp->reset_task, 0);
 }
 
-#ifdef BCM_VLAN
-/* called with rtnl_lock */
-void bnx2x_vlan_rx_register(struct net_device *dev,
-                                  struct vlan_group *vlgrp)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-
-       bp->vlgrp = vlgrp;
-}
-
-#endif
 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *dev = pci_get_drvdata(pdev);