#include "bna.h"
#include "cna.h"
-DEFINE_MUTEX(bnad_fwimg_mutex);
+static DEFINE_MUTEX(bnad_fwimg_mutex);
/*
* Module params
*/
u32 bnad_rxqs_per_cq = 2;
-const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*
* Local MACROS
(flags & BNA_CQ_EF_L4_CKSUM_OK)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += skb->len;
static void
bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
{
- spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
bnad_enable_rx_irq_unsafe(ccb);
- spin_unlock_irq(&bnad->bna_lock);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
static void
bnad_msix_mbox_handler(int irq, void *data)
{
u32 intr_status;
- unsigned long flags;
+ unsigned long flags;
struct net_device *netdev = data;
struct bnad *bnad;
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
- spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+ return IRQ_NONE;
bna_intr_status_get(&bnad->bna, intr_status);
- if (!intr_status) {
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (unlikely(!intr_status))
return IRQ_NONE;
- }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
bna_mbox_handler(&bnad->bna, intr_status);
{
int irq = BNAD_GET_MBOX_IRQ(bnad);
- if (!(bnad->cfg_flags & BNAD_CF_MSIX))
- return;
-
if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
- enable_irq(irq);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ enable_irq(irq);
+
BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
}
* Called with bnad->bna_lock held b'cos of
* bnad->cfg_flags access.
*/
-void
+static void
bnad_disable_mbox_irq(struct bnad *bnad)
{
int irq = BNAD_GET_MBOX_IRQ(bnad);
- if (!(bnad->cfg_flags & BNAD_CF_MSIX))
- return;
if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
- disable_irq_nosync(irq);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ disable_irq_nosync(irq);
+
BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
}
static void
bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
{
- struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_unmap_q *unmap_q;
if (!tcb || (!tcb->unmap_q))
return;
+ unmap_q = tcb->unmap_q;
if (!unmap_q->unmap_array)
return;
jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
}
-void
-bnad_cb_stats_clr(struct bnad *bnad)
-{
-}
-
/* Resource allocation, free functions */
static void
return;
spin_lock_irqsave(&bnad->bna_lock, flags);
-
bnad_disable_mbox_irq(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
irq = BNAD_GET_MBOX_IRQ(bnad);
free_irq(irq, bnad->netdev);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
kfree(intr_info->idl);
}
sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
+ /*
+ * Set the Mbox IRQ disable flag, so that the IRQ handler
+ * called from request_irq() for SHARED IRQs do not execute
+ */
+ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+
err = request_irq(irq, irq_handler, flags,
bnad->mbox_irq_name, bnad->netdev);
+
if (err) {
kfree(intr_info->idl);
intr_info->idl = NULL;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
- bnad_disable_mbox_irq(bnad);
+
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ disable_irq_nosync(irq);
+
spin_unlock_irqrestore(&bnad->bna_lock, flags);
return 0;
}
jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
}
/*
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&bnad->bnad_completions.ioc_comp);
-
}
static int
bnad_enable_msix(struct bnad *bnad)
{
int i, ret;
- u32 tot_msix_num;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->msix_table)
return;
- tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
-
bnad->msix_table =
- kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+ kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
if (!bnad->msix_table)
goto intx_mode;
- for (i = 0; i < tot_msix_num; i++)
+ for (i = 0; i < bnad->msix_num; i++)
bnad->msix_table[i].entry = i;
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
if (ret > 0) {
/* Not enough MSI-X vectors. */
+ (bnad->num_rx
* bnad->num_rxp_per_rx) +
BNAD_MAILBOX_MSIX_VECTORS;
- tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
/* Try once more with adjusted numbers */
/* If this fails, fall back to INTx */
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
- tot_msix_num);
+ bnad->msix_num);
if (ret)
goto intx_mode;
kfree(bnad->msix_table);
bnad->msix_table = NULL;
bnad->msix_num = 0;
- bnad->msix_diag_num = 0;
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad->cfg_flags &= ~BNAD_CF_MSIX;
bnad_q_num_init(bnad);
htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
BNA_TXQ_WI_SEND));
- if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
+ if (vlan_tx_tag_present(skb)) {
vlan_tag = (u16) vlan_tx_tag_get(skb);
flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
}
kzalloc((mc_count + 1) * ETH_ALEN,
GFP_ATOMIC);
if (!mcaddr_list)
- return;
+ goto unlock;
memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
kfree(mcaddr_list);
}
+unlock:
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
(bnad->num_rx * bnad->num_rxp_per_rx) +
BNAD_MAILBOX_MSIX_VECTORS;
- bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
bnad->txq_depth = BNAD_TXQ_DEPTH;
bnad->rxq_depth = BNAD_RXQ_DEPTH;
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
-
spin_unlock_irqrestore(&bnad->bna_lock, flags);
bnad->stats.bna_stats = &bna->stats;
free_netdev(netdev);
}
-const struct pci_device_id bnad_pci_id_table[] = {
+static const struct pci_device_id bnad_pci_id_table[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
PCI_DEVICE_ID_BROCADE_CT),