for (j = 0; j < bp->rx_max_pg_ring; j++) {
if (rxr->rx_pg_desc_ring[j])
pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- rxr->rx_pg_desc_ring[i],
- rxr->rx_pg_desc_mapping[i]);
- rxr->rx_pg_desc_ring[i] = NULL;
+ rxr->rx_pg_desc_ring[j],
+ rxr->rx_pg_desc_mapping[j]);
+ rxr->rx_pg_desc_ring[j] = NULL;
}
if (rxr->rx_pg_ring)
vfree(rxr->rx_pg_ring);
{
struct bnx2_napi *bnapi = dev_instance;
struct bnx2 *bp = bnapi->bp;
- struct net_device *dev = bp->dev;
prefetch(bnapi->status_blk.msi);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(dev, &bnapi->napi);
+ netif_rx_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
{
struct bnx2_napi *bnapi = dev_instance;
struct bnx2 *bp = bnapi->bp;
- struct net_device *dev = bp->dev;
prefetch(bnapi->status_blk.msi);
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(dev, &bnapi->napi);
+ netif_rx_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
{
struct bnx2_napi *bnapi = dev_instance;
struct bnx2 *bp = bnapi->bp;
- struct net_device *dev = bp->dev;
struct status_block *sblk = bnapi->status_blk.msi;
/* When using INTx, it is possible for the interrupt to arrive
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
+ if (netif_rx_schedule_prep(&bnapi->napi)) {
bnapi->last_status_idx = sblk->status_idx;
- __netif_rx_schedule(dev, &bnapi->napi);
+ __netif_rx_schedule(&bnapi->napi);
}
return IRQ_HANDLED;
rmb();
if (likely(!bnx2_has_fast_work(bnapi))) {
- netif_rx_complete(bp->dev, napi);
+ netif_rx_complete(napi);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
bnapi->last_status_idx);
rmb();
if (likely(!bnx2_has_work(bnapi))) {
- netif_rx_complete(bp->dev, napi);
+ netif_rx_complete(napi);
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |