2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
31 DEFINE_MUTEX(bnad_fwimg_mutex);
36 static uint bnad_msix_disable;
37 module_param(bnad_msix_disable, uint, 0444);
38 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
40 static uint bnad_ioc_auto_recover = 1;
41 module_param(bnad_ioc_auto_recover, uint, 0444);
42 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 u32 bnad_rxqs_per_cq = 2;
49 const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
54 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58 #define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
63 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
74 * Reinitialize completions in CQ, once Rx is taken down
77 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
79 struct bna_cq_entry *cmpl, *next_cmpl;
80 unsigned int wi_range, wis = 0, ccb_prod = 0;
83 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
86 for (i = 0; i < ccb->q_depth; i++) {
88 if (likely(--wi_range))
91 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
93 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
102 * Frees all pending Tx Bufs
103 * At this point no activity is expected on the Q,
104 * so DMA unmap & freeing is fine.
107 bnad_free_all_txbufs(struct bnad *bnad,
111 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
112 struct bnad_skb_unmap *unmap_array;
113 struct sk_buff *skb = NULL;
116 unmap_array = unmap_q->unmap_array;
119 while (unmap_cons < unmap_q->q_depth) {
120 skb = unmap_array[unmap_cons].skb;
125 unmap_array[unmap_cons].skb = NULL;
127 pci_unmap_single(bnad->pcidev,
128 pci_unmap_addr(&unmap_array[unmap_cons],
129 dma_addr), skb_headlen(skb),
132 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
135 pci_unmap_page(bnad->pcidev,
136 pci_unmap_addr(&unmap_array[unmap_cons],
138 skb_shinfo(skb)->frags[i].size,
140 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
144 dev_kfree_skb_any(skb);
148 /* Data Path Handlers */
151 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
152 * Can be called in a) Interrupt context
157 bnad_free_txbufs(struct bnad *bnad,
160 u32 sent_packets = 0, sent_bytes = 0;
161 u16 wis, unmap_cons, updated_hw_cons;
162 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
163 struct bnad_skb_unmap *unmap_array;
168 * Just return if TX is stopped. This check is useful
169 * when bnad_free_txbufs() runs out of a tasklet scheduled
170 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
171 * but this routine runs actually after the cleanup has been
174 if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
177 updated_hw_cons = *(tcb->hw_consumer_index);
179 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
180 updated_hw_cons, tcb->q_depth);
182 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
184 unmap_array = unmap_q->unmap_array;
185 unmap_cons = unmap_q->consumer_index;
187 prefetch(&unmap_array[unmap_cons + 1]);
189 skb = unmap_array[unmap_cons].skb;
191 unmap_array[unmap_cons].skb = NULL;
194 sent_bytes += skb->len;
195 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
197 pci_unmap_single(bnad->pcidev,
198 pci_unmap_addr(&unmap_array[unmap_cons],
199 dma_addr), skb_headlen(skb),
201 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
202 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
204 prefetch(&unmap_array[unmap_cons + 1]);
205 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
206 prefetch(&unmap_array[unmap_cons + 1]);
208 pci_unmap_page(bnad->pcidev,
209 pci_unmap_addr(&unmap_array[unmap_cons],
211 skb_shinfo(skb)->frags[i].size,
213 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
215 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
217 dev_kfree_skb_any(skb);
220 /* Update consumer pointers. */
221 tcb->consumer_index = updated_hw_cons;
222 unmap_q->consumer_index = unmap_cons;
224 tcb->txq->tx_packets += sent_packets;
225 tcb->txq->tx_bytes += sent_bytes;
230 /* Tx Free Tasklet function */
231 /* Frees for all the tcb's in all the Tx's */
233 * Scheduled from sending context, so that
234 * the fat Tx lock is not held for too long
235 * in the sending context.
238 bnad_tx_free_tasklet(unsigned long bnad_ptr)
240 struct bnad *bnad = (struct bnad *)bnad_ptr;
245 for (i = 0; i < bnad->num_tx; i++) {
246 for (j = 0; j < bnad->num_txq_per_tx; j++) {
247 tcb = bnad->tx_info[i].tcb[j];
250 if (((u16) (*tcb->hw_consumer_index) !=
251 tcb->consumer_index) &&
252 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
254 acked = bnad_free_txbufs(bnad, tcb);
255 bna_ib_ack(tcb->i_dbell, acked);
256 smp_mb__before_clear_bit();
257 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
264 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
266 struct net_device *netdev = bnad->netdev;
269 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
272 sent = bnad_free_txbufs(bnad, tcb);
274 if (netif_queue_stopped(netdev) &&
275 netif_carrier_ok(netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(netdev);
279 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
281 bna_ib_ack(tcb->i_dbell, sent);
283 bna_ib_ack(tcb->i_dbell, 0);
285 smp_mb__before_clear_bit();
286 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
291 /* MSIX Tx Completion Handler */
293 bnad_msix_tx(int irq, void *data)
295 struct bna_tcb *tcb = (struct bna_tcb *)data;
296 struct bnad *bnad = tcb->bnad;
304 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
306 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
308 rcb->producer_index = 0;
309 rcb->consumer_index = 0;
311 unmap_q->producer_index = 0;
312 unmap_q->consumer_index = 0;
316 bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
318 struct bnad_unmap_q *unmap_q;
321 unmap_q = rcb->unmap_q;
322 while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
323 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
325 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
326 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
327 unmap_array[unmap_q->consumer_index],
328 dma_addr), rcb->rxq->buffer_size +
329 NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
331 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
332 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
335 bnad_reset_rcb(bnad, rcb);
339 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341 u16 to_alloc, alloced, unmap_prod, wi_range;
342 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_skb_unmap *unmap_array;
344 struct bna_rxq_entry *rxent;
350 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
352 unmap_array = unmap_q->unmap_array;
353 unmap_prod = unmap_q->producer_index;
355 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
359 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
362 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
364 if (unlikely(!skb)) {
365 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
368 skb->dev = bnad->netdev;
369 skb_reserve(skb, NET_IP_ALIGN);
370 unmap_array[unmap_prod].skb = skb;
371 dma_addr = pci_map_single(bnad->pcidev, skb->data,
372 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
373 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
375 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
376 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
384 if (likely(alloced)) {
385 unmap_q->producer_index = unmap_prod;
386 rcb->producer_index = unmap_prod;
388 bna_rxq_prod_indx_doorbell(rcb);
393 * Locking is required in the enable path
394 * because it is called from a napi poll
395 * context, where the bna_lock is not held
396 * unlike the IRQ context.
399 bnad_enable_txrx_irqs(struct bnad *bnad)
406 spin_lock_irqsave(&bnad->bna_lock, flags);
407 for (i = 0; i < bnad->num_tx; i++) {
408 for (j = 0; j < bnad->num_txq_per_tx; j++) {
409 tcb = bnad->tx_info[i].tcb[j];
410 bna_ib_coalescing_timer_set(tcb->i_dbell,
411 tcb->txq->ib->ib_config.coalescing_timeo);
412 bna_ib_ack(tcb->i_dbell, 0);
416 for (i = 0; i < bnad->num_rx; i++) {
417 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
418 ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
419 bnad_enable_rx_irq_unsafe(ccb);
422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
426 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
428 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
430 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
431 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
432 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
433 bnad_alloc_n_post_rxbufs(bnad, rcb);
434 smp_mb__before_clear_bit();
435 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
440 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
442 struct bna_cq_entry *cmpl, *next_cmpl;
443 struct bna_rcb *rcb = NULL;
444 unsigned int wi_range, packets = 0, wis = 0;
445 struct bnad_unmap_q *unmap_q;
448 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
449 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
459 if (qid0 == cmpl->rxq_id)
464 unmap_q = rcb->unmap_q;
466 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
468 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
469 pci_unmap_single(bnad->pcidev,
470 pci_unmap_addr(&unmap_q->
471 unmap_array[unmap_q->
474 rcb->rxq->buffer_size,
476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
493 flags = ntohl(cmpl->flags);
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
503 skb_put(skb, ntohs(cmpl->length));
506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
513 skb_checksum_none_assert(skb);
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
520 struct bnad_rx_ctrl *rx_ctrl =
521 (struct bnad_rx_ctrl *)ccb->ctrl;
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
524 ntohs(cmpl->vlan_tag), skb);
526 vlan_hwaccel_receive_skb(skb,
528 ntohs(cmpl->vlan_tag));
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl *rx_ctrl =
532 (struct bnad_rx_ctrl *)ccb->ctrl;
533 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 napi_gro_receive(&rx_ctrl->napi, skb);
536 netif_receive_skb(skb);
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
547 bna_ib_ack(ccb->i_dbell, packets);
548 bnad_refill_rxq(bnad, ccb->rcb[0]);
550 bnad_refill_rxq(bnad, ccb->rcb[1]);
552 bna_ib_ack(ccb->i_dbell, 0);
558 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0);
565 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
567 spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
568 bnad_enable_rx_irq_unsafe(ccb);
569 spin_unlock_irq(&bnad->bna_lock);
573 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
575 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
576 if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
577 bnad_disable_rx_irq(bnad, ccb);
578 __napi_schedule((&rx_ctrl->napi));
580 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
583 /* MSIX Rx Path Handler */
585 bnad_msix_rx(int irq, void *data)
587 struct bna_ccb *ccb = (struct bna_ccb *)data;
588 struct bnad *bnad = ccb->bnad;
590 bnad_netif_rx_schedule_poll(bnad, ccb);
595 /* Interrupt handlers */
597 /* Mbox Interrupt Handlers */
599 bnad_msix_mbox_handler(int irq, void *data)
603 struct net_device *netdev = data;
606 bnad = netdev_priv(netdev);
608 /* BNA_ISR_GET(bnad); Inc Ref count */
609 spin_lock_irqsave(&bnad->bna_lock, flags);
611 bna_intr_status_get(&bnad->bna, intr_status);
613 if (BNA_IS_MBOX_ERR_INTR(intr_status))
614 bna_mbox_handler(&bnad->bna, intr_status);
616 spin_unlock_irqrestore(&bnad->bna_lock, flags);
618 /* BNAD_ISR_PUT(bnad); Dec Ref count */
623 bnad_isr(int irq, void *data)
628 struct net_device *netdev = data;
629 struct bnad *bnad = netdev_priv(netdev);
630 struct bnad_rx_info *rx_info;
631 struct bnad_rx_ctrl *rx_ctrl;
633 spin_lock_irqsave(&bnad->bna_lock, flags);
635 bna_intr_status_get(&bnad->bna, intr_status);
637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
641 if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
642 bna_mbox_handler(&bnad->bna, intr_status);
643 if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
644 spin_unlock_irqrestore(&bnad->bna_lock, flags);
648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
650 /* Process data interrupts */
651 for (i = 0; i < bnad->num_rx; i++) {
652 rx_info = &bnad->rx_info[i];
655 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
656 rx_ctrl = &rx_info->rx_ctrl[j];
658 bnad_netif_rx_schedule_poll(bnad,
667 * Called in interrupt / callback context
668 * with bna_lock held, so cfg_flags access is OK
671 bnad_enable_mbox_irq(struct bnad *bnad)
673 int irq = BNAD_GET_MBOX_IRQ(bnad);
675 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
678 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
680 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
684 * Called with bnad->bna_lock held b'cos of
685 * bnad->cfg_flags access.
688 bnad_disable_mbox_irq(struct bnad *bnad)
690 int irq = BNAD_GET_MBOX_IRQ(bnad);
692 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
695 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
696 disable_irq_nosync(irq);
697 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
700 /* Control Path Handlers */
704 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
706 bnad_enable_mbox_irq(bnad);
710 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
712 bnad_disable_mbox_irq(bnad);
716 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
718 complete(&bnad->bnad_completions.ioc_comp);
719 bnad->bnad_completions.ioc_comp_status = status;
723 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
725 complete(&bnad->bnad_completions.ioc_comp);
726 bnad->bnad_completions.ioc_comp_status = status;
730 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
732 struct bnad *bnad = (struct bnad *)arg;
734 complete(&bnad->bnad_completions.port_comp);
736 netif_carrier_off(bnad->netdev);
740 bnad_cb_port_link_status(struct bnad *bnad,
741 enum bna_link_status link_status)
745 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
747 if (link_status == BNA_CEE_UP) {
748 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
749 BNAD_UPDATE_CTR(bnad, cee_up);
751 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
754 if (!netif_carrier_ok(bnad->netdev)) {
755 pr_warn("bna: %s link up\n",
757 netif_carrier_on(bnad->netdev);
758 BNAD_UPDATE_CTR(bnad, link_toggle);
759 if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
760 /* Force an immediate Transmit Schedule */
761 pr_info("bna: %s TX_STARTED\n",
763 netif_wake_queue(bnad->netdev);
764 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
766 netif_stop_queue(bnad->netdev);
767 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
771 if (netif_carrier_ok(bnad->netdev)) {
772 pr_warn("bna: %s link down\n",
774 netif_carrier_off(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, link_toggle);
781 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
782 enum bna_cb_status status)
784 struct bnad *bnad = (struct bnad *)arg;
786 complete(&bnad->bnad_completions.tx_comp);
790 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
792 struct bnad_tx_info *tx_info =
793 (struct bnad_tx_info *)tcb->txq->tx->priv;
794 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
796 tx_info->tcb[tcb->id] = tcb;
797 unmap_q->producer_index = 0;
798 unmap_q->consumer_index = 0;
799 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
803 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
805 struct bnad_tx_info *tx_info =
806 (struct bnad_tx_info *)tcb->txq->tx->priv;
808 tx_info->tcb[tcb->id] = NULL;
812 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
814 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
816 unmap_q->producer_index = 0;
817 unmap_q->consumer_index = 0;
818 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
822 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
824 struct bnad_rx_info *rx_info =
825 (struct bnad_rx_info *)ccb->cq->rx->priv;
827 rx_info->rx_ctrl[ccb->id].ccb = ccb;
828 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
832 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
834 struct bnad_rx_info *rx_info =
835 (struct bnad_rx_info *)ccb->cq->rx->priv;
837 rx_info->rx_ctrl[ccb->id].ccb = NULL;
841 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
843 struct bnad_tx_info *tx_info =
844 (struct bnad_tx_info *)tcb->txq->tx->priv;
846 if (tx_info != &bnad->tx_info[0])
849 clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
850 netif_stop_queue(bnad->netdev);
851 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
855 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
857 if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
860 if (netif_carrier_ok(bnad->netdev)) {
861 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
862 netif_wake_queue(bnad->netdev);
863 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
868 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
870 struct bnad_unmap_q *unmap_q;
872 if (!tcb || (!tcb->unmap_q))
875 unmap_q = tcb->unmap_q;
876 if (!unmap_q->unmap_array)
879 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
882 bnad_free_all_txbufs(bnad, tcb);
884 unmap_q->producer_index = 0;
885 unmap_q->consumer_index = 0;
887 smp_mb__before_clear_bit();
888 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
892 bnad_cb_rx_cleanup(struct bnad *bnad,
895 bnad_cq_cmpl_init(bnad, ccb);
897 bnad_free_rxbufs(bnad, ccb->rcb[0]);
898 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
901 bnad_free_rxbufs(bnad, ccb->rcb[1]);
902 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
907 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
909 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
911 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
913 /* Now allocate & post buffers for this RCB */
914 /* !!Allocation in callback context */
915 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
916 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
917 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
918 bnad_alloc_n_post_rxbufs(bnad, rcb);
919 smp_mb__before_clear_bit();
920 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
925 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
926 enum bna_cb_status status)
928 struct bnad *bnad = (struct bnad *)arg;
930 complete(&bnad->bnad_completions.rx_comp);
934 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
935 enum bna_cb_status status)
937 bnad->bnad_completions.mcast_comp_status = status;
938 complete(&bnad->bnad_completions.mcast_comp);
942 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
943 struct bna_stats *stats)
945 if (status == BNA_CB_SUCCESS)
946 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
948 if (!netif_running(bnad->netdev) ||
949 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
952 mod_timer(&bnad->stats_timer,
953 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
957 bnad_cb_stats_clr(struct bnad *bnad)
961 /* Resource allocation, free functions */
964 bnad_mem_free(struct bnad *bnad,
965 struct bna_mem_info *mem_info)
970 if (mem_info->mdl == NULL)
973 for (i = 0; i < mem_info->num; i++) {
974 if (mem_info->mdl[i].kva != NULL) {
975 if (mem_info->mem_type == BNA_MEM_T_DMA) {
976 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
978 pci_free_consistent(bnad->pcidev,
979 mem_info->mdl[i].len,
980 mem_info->mdl[i].kva, dma_pa);
982 kfree(mem_info->mdl[i].kva);
985 kfree(mem_info->mdl);
986 mem_info->mdl = NULL;
990 bnad_mem_alloc(struct bnad *bnad,
991 struct bna_mem_info *mem_info)
996 if ((mem_info->num == 0) || (mem_info->len == 0)) {
997 mem_info->mdl = NULL;
1001 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1003 if (mem_info->mdl == NULL)
1006 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1007 for (i = 0; i < mem_info->num; i++) {
1008 mem_info->mdl[i].len = mem_info->len;
1009 mem_info->mdl[i].kva =
1010 pci_alloc_consistent(bnad->pcidev,
1011 mem_info->len, &dma_pa);
1013 if (mem_info->mdl[i].kva == NULL)
1016 BNA_SET_DMA_ADDR(dma_pa,
1017 &(mem_info->mdl[i].dma));
1020 for (i = 0; i < mem_info->num; i++) {
1021 mem_info->mdl[i].len = mem_info->len;
1022 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1024 if (mem_info->mdl[i].kva == NULL)
1032 bnad_mem_free(bnad, mem_info);
1036 /* Free IRQ for Mailbox */
1038 bnad_mbox_irq_free(struct bnad *bnad,
1039 struct bna_intr_info *intr_info)
1042 unsigned long flags;
1044 if (intr_info->idl == NULL)
1047 spin_lock_irqsave(&bnad->bna_lock, flags);
1049 bnad_disable_mbox_irq(bnad);
1051 irq = BNAD_GET_MBOX_IRQ(bnad);
1052 free_irq(irq, bnad->netdev);
1054 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1056 kfree(intr_info->idl);
1060 * Allocates IRQ for Mailbox, but keep it disabled
1061 * This will be enabled once we get the mbox enable callback
1065 bnad_mbox_irq_alloc(struct bnad *bnad,
1066 struct bna_intr_info *intr_info)
1069 unsigned long flags;
1071 irq_handler_t irq_handler;
1073 /* Mbox should use only 1 vector */
1075 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1076 if (!intr_info->idl)
1079 spin_lock_irqsave(&bnad->bna_lock, flags);
1080 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1081 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1082 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1084 intr_info->intr_type = BNA_INTR_T_MSIX;
1085 intr_info->idl[0].vector = bnad->msix_num - 1;
1087 irq_handler = (irq_handler_t)bnad_isr;
1088 irq = bnad->pcidev->irq;
1089 flags = IRQF_SHARED;
1090 intr_info->intr_type = BNA_INTR_T_INTX;
1091 /* intr_info->idl.vector = 0 ? */
1093 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1095 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1097 err = request_irq(irq, irq_handler, flags,
1098 bnad->mbox_irq_name, bnad->netdev);
1100 kfree(intr_info->idl);
1101 intr_info->idl = NULL;
1105 spin_lock_irqsave(&bnad->bna_lock, flags);
1106 bnad_disable_mbox_irq(bnad);
1107 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1112 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1114 kfree(intr_info->idl);
1115 intr_info->idl = NULL;
1118 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1120 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1121 uint txrx_id, struct bna_intr_info *intr_info)
1123 int i, vector_start = 0;
1125 unsigned long flags;
1127 spin_lock_irqsave(&bnad->bna_lock, flags);
1128 cfg_flags = bnad->cfg_flags;
1129 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131 if (cfg_flags & BNAD_CF_MSIX) {
1132 intr_info->intr_type = BNA_INTR_T_MSIX;
1133 intr_info->idl = kcalloc(intr_info->num,
1134 sizeof(struct bna_intr_descr),
1136 if (!intr_info->idl)
1141 vector_start = txrx_id;
1145 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1153 for (i = 0; i < intr_info->num; i++)
1154 intr_info->idl[i].vector = vector_start + i;
1156 intr_info->intr_type = BNA_INTR_T_INTX;
1158 intr_info->idl = kcalloc(intr_info->num,
1159 sizeof(struct bna_intr_descr),
1161 if (!intr_info->idl)
1166 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1170 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1178 * NOTE: Should be called for MSIX only
1179 * Unregisters Tx MSIX vector(s) from the kernel
1182 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1188 for (i = 0; i < num_txqs; i++) {
1189 if (tx_info->tcb[i] == NULL)
1192 vector_num = tx_info->tcb[i]->intr_vector;
1193 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1198 * NOTE: Should be called for MSIX only
1199 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1202 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1203 uint tx_id, int num_txqs)
1209 for (i = 0; i < num_txqs; i++) {
1210 vector_num = tx_info->tcb[i]->intr_vector;
1211 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1212 tx_id + tx_info->tcb[i]->id);
1213 err = request_irq(bnad->msix_table[vector_num].vector,
1214 (irq_handler_t)bnad_msix_tx, 0,
1215 tx_info->tcb[i]->name,
1225 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1230 * NOTE: Should be called for MSIX only
1231 * Unregisters Rx MSIX vector(s) from the kernel
1234 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1240 for (i = 0; i < num_rxps; i++) {
1241 if (rx_info->rx_ctrl[i].ccb == NULL)
1244 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1245 free_irq(bnad->msix_table[vector_num].vector,
1246 rx_info->rx_ctrl[i].ccb);
1251 * NOTE: Should be called for MSIX only
1252 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1255 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1256 uint rx_id, int num_rxps)
1262 for (i = 0; i < num_rxps; i++) {
1263 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1264 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1266 rx_id + rx_info->rx_ctrl[i].ccb->id);
1267 err = request_irq(bnad->msix_table[vector_num].vector,
1268 (irq_handler_t)bnad_msix_rx, 0,
1269 rx_info->rx_ctrl[i].ccb->name,
1270 rx_info->rx_ctrl[i].ccb);
1279 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1283 /* Free Tx object Resources */
1285 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1289 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1290 if (res_info[i].res_type == BNA_RES_T_MEM)
1291 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1292 else if (res_info[i].res_type == BNA_RES_T_INTR)
1293 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1297 /* Allocates memory and interrupt resources for Tx object */
1299 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1304 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1305 if (res_info[i].res_type == BNA_RES_T_MEM)
1306 err = bnad_mem_alloc(bnad,
1307 &res_info[i].res_u.mem_info);
1308 else if (res_info[i].res_type == BNA_RES_T_INTR)
1309 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1310 &res_info[i].res_u.intr_info);
1317 bnad_tx_res_free(bnad, res_info);
1321 /* Free Rx object Resources */
1323 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1327 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1328 if (res_info[i].res_type == BNA_RES_T_MEM)
1329 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1330 else if (res_info[i].res_type == BNA_RES_T_INTR)
1331 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1335 /* Allocates memory and interrupt resources for Rx object */
1337 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1342 /* All memory needs to be allocated before setup_ccbs */
1343 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1344 if (res_info[i].res_type == BNA_RES_T_MEM)
1345 err = bnad_mem_alloc(bnad,
1346 &res_info[i].res_u.mem_info);
1347 else if (res_info[i].res_type == BNA_RES_T_INTR)
1348 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1349 &res_info[i].res_u.intr_info);
1356 bnad_rx_res_free(bnad, res_info);
1360 /* Timer callbacks */
1363 bnad_ioc_timeout(unsigned long data)
1365 struct bnad *bnad = (struct bnad *)data;
1366 unsigned long flags;
1368 spin_lock_irqsave(&bnad->bna_lock, flags);
1369 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1370 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1374 bnad_ioc_hb_check(unsigned long data)
1376 struct bnad *bnad = (struct bnad *)data;
1377 unsigned long flags;
1379 spin_lock_irqsave(&bnad->bna_lock, flags);
1380 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1385 bnad_ioc_sem_timeout(unsigned long data)
1387 struct bnad *bnad = (struct bnad *)data;
1388 unsigned long flags;
1390 spin_lock_irqsave(&bnad->bna_lock, flags);
1391 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1396 * All timer routines use bnad->bna_lock to protect against
1397 * the following race, which may occur in case of no locking:
1405 /* b) Dynamic Interrupt Moderation Timer */
1407 bnad_dim_timeout(unsigned long data)
1409 struct bnad *bnad = (struct bnad *)data;
1410 struct bnad_rx_info *rx_info;
1411 struct bnad_rx_ctrl *rx_ctrl;
1413 unsigned long flags;
1415 if (!netif_carrier_ok(bnad->netdev))
1418 spin_lock_irqsave(&bnad->bna_lock, flags);
1419 for (i = 0; i < bnad->num_rx; i++) {
1420 rx_info = &bnad->rx_info[i];
1423 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1424 rx_ctrl = &rx_info->rx_ctrl[j];
1427 bna_rx_dim_update(rx_ctrl->ccb);
1431 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1432 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1433 mod_timer(&bnad->dim_timer,
1434 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1435 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1438 /* c) Statistics Timer */
1440 bnad_stats_timeout(unsigned long data)
1442 struct bnad *bnad = (struct bnad *)data;
1443 unsigned long flags;
1445 if (!netif_running(bnad->netdev) ||
1446 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1449 spin_lock_irqsave(&bnad->bna_lock, flags);
1450 bna_stats_get(&bnad->bna);
1451 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1455 * Set up timer for DIM
1456 * Called with bnad->bna_lock held
1459 bnad_dim_timer_start(struct bnad *bnad)
1461 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1462 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1463 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1464 (unsigned long)bnad);
1465 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1466 mod_timer(&bnad->dim_timer,
1467 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1472 * Set up timer for statistics
1473 * Called with mutex_lock(&bnad->conf_mutex) held
1476 bnad_stats_timer_start(struct bnad *bnad)
1478 unsigned long flags;
1480 spin_lock_irqsave(&bnad->bna_lock, flags);
1481 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1482 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1483 (unsigned long)bnad);
1484 mod_timer(&bnad->stats_timer,
1485 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1492 * Stops the stats timer
1493 * Called with mutex_lock(&bnad->conf_mutex) held
1496 bnad_stats_timer_stop(struct bnad *bnad)
1499 unsigned long flags;
1501 spin_lock_irqsave(&bnad->bna_lock, flags);
1502 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1504 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1506 del_timer_sync(&bnad->stats_timer);
1512 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1514 int i = 1; /* Index 0 has broadcast address */
1515 struct netdev_hw_addr *mc_addr;
1517 netdev_for_each_mc_addr(mc_addr, netdev) {
1518 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1525 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1527 struct bnad_rx_ctrl *rx_ctrl =
1528 container_of(napi, struct bnad_rx_ctrl, napi);
1529 struct bna_ccb *ccb;
1537 if (!netif_carrier_ok(bnad->netdev))
1540 rcvd = bnad_poll_cq(bnad, ccb, budget);
1545 napi_complete((napi));
1547 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1549 bnad_enable_rx_irq(bnad, ccb);
1554 bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
1556 struct bnad_rx_ctrl *rx_ctrl =
1557 container_of(napi, struct bnad_rx_ctrl, napi);
1558 struct bna_ccb *ccb;
1567 if (!netif_carrier_ok(bnad->netdev))
1570 /* Handle Tx Completions, if any */
1571 for (i = 0; i < bnad->num_tx; i++) {
1572 for (j = 0; j < bnad->num_txq_per_tx; j++)
1573 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
1576 /* Handle Rx Completions */
1577 rcvd = bnad_poll_cq(bnad, ccb, budget);
1581 napi_complete((napi));
1583 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1585 bnad_enable_txrx_irqs(bnad);
1590 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1592 int (*napi_poll) (struct napi_struct *, int);
1593 struct bnad_rx_ctrl *rx_ctrl;
1595 unsigned long flags;
1597 spin_lock_irqsave(&bnad->bna_lock, flags);
1598 if (bnad->cfg_flags & BNAD_CF_MSIX)
1599 napi_poll = bnad_napi_poll_rx;
1601 napi_poll = bnad_napi_poll_txrx;
1602 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1604 /* Initialize & enable NAPI */
1605 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1606 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1607 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1609 napi_enable(&rx_ctrl->napi);
1614 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1618 /* First disable and then clean up */
1619 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1620 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1621 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1625 /* Should be held with conf_lock held */
1627 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1629 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1630 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1631 unsigned long flags;
1636 init_completion(&bnad->bnad_completions.tx_comp);
1637 spin_lock_irqsave(&bnad->bna_lock, flags);
1638 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1639 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1640 wait_for_completion(&bnad->bnad_completions.tx_comp);
1642 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1643 bnad_tx_msix_unregister(bnad, tx_info,
1644 bnad->num_txq_per_tx);
1646 spin_lock_irqsave(&bnad->bna_lock, flags);
1647 bna_tx_destroy(tx_info->tx);
1648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1653 tasklet_kill(&bnad->tx_free_tasklet);
1655 bnad_tx_res_free(bnad, res_info);
1658 /* Should be held with conf_lock held */
1660 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1663 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1664 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1665 struct bna_intr_info *intr_info =
1666 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1667 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1668 struct bna_tx_event_cbfn tx_cbfn;
1670 unsigned long flags;
1672 /* Initialize the Tx object configuration */
1673 tx_config->num_txq = bnad->num_txq_per_tx;
1674 tx_config->txq_depth = bnad->txq_depth;
1675 tx_config->tx_type = BNA_TX_T_REGULAR;
1677 /* Initialize the tx event handlers */
1678 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1679 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1680 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1681 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1682 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1684 /* Get BNA's resource requirement for one tx object */
1685 spin_lock_irqsave(&bnad->bna_lock, flags);
1686 bna_tx_res_req(bnad->num_txq_per_tx,
1687 bnad->txq_depth, res_info);
1688 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1690 /* Fill Unmap Q memory requirements */
1691 BNAD_FILL_UNMAPQ_MEM_REQ(
1692 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1693 bnad->num_txq_per_tx,
1694 BNAD_TX_UNMAPQ_DEPTH);
1696 /* Allocate resources */
1697 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1701 /* Ask BNA to create one Tx object, supplying required resources */
1702 spin_lock_irqsave(&bnad->bna_lock, flags);
1703 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1705 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1710 /* Register ISR for the Tx object */
1711 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1712 err = bnad_tx_msix_register(bnad, tx_info,
1713 tx_id, bnad->num_txq_per_tx);
1718 spin_lock_irqsave(&bnad->bna_lock, flags);
1720 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1725 bnad_tx_res_free(bnad, res_info);
1729 /* Setup the rx config for bna_rx_create */
1730 /* bnad decides the configuration */
1732 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1734 rx_config->rx_type = BNA_RX_T_REGULAR;
1735 rx_config->num_paths = bnad->num_rxp_per_rx;
1737 if (bnad->num_rxp_per_rx > 1) {
1738 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1739 rx_config->rss_config.hash_type =
1744 rx_config->rss_config.hash_mask =
1745 bnad->num_rxp_per_rx - 1;
1746 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1747 sizeof(rx_config->rss_config.toeplitz_hash_key));
1749 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1750 memset(&rx_config->rss_config, 0,
1751 sizeof(rx_config->rss_config));
1753 rx_config->rxp_type = BNA_RXP_SLR;
1754 rx_config->q_depth = bnad->rxq_depth;
1756 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1758 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1761 /* Called with mutex_lock(&bnad->conf_mutex) held */
1763 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1765 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1766 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1767 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1768 unsigned long flags;
1769 int dim_timer_del = 0;
1775 spin_lock_irqsave(&bnad->bna_lock, flags);
1776 dim_timer_del = bnad_dim_timer_running(bnad);
1778 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1779 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1781 del_timer_sync(&bnad->dim_timer);
1784 bnad_napi_disable(bnad, rx_id);
1786 init_completion(&bnad->bnad_completions.rx_comp);
1787 spin_lock_irqsave(&bnad->bna_lock, flags);
1788 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1789 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1790 wait_for_completion(&bnad->bnad_completions.rx_comp);
1792 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1793 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1795 spin_lock_irqsave(&bnad->bna_lock, flags);
1796 bna_rx_destroy(rx_info->rx);
1797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1801 bnad_rx_res_free(bnad, res_info);
1804 /* Called with mutex_lock(&bnad->conf_mutex) held */
1806 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1809 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1810 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1811 struct bna_intr_info *intr_info =
1812 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1813 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1814 struct bna_rx_event_cbfn rx_cbfn;
1816 unsigned long flags;
1818 /* Initialize the Rx object configuration */
1819 bnad_init_rx_config(bnad, rx_config);
1821 /* Initialize the Rx event handlers */
1822 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1823 rx_cbfn.rcb_destroy_cbfn = NULL;
1824 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1825 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1826 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1827 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1829 /* Get BNA's resource requirement for one Rx object */
1830 spin_lock_irqsave(&bnad->bna_lock, flags);
1831 bna_rx_res_req(rx_config, res_info);
1832 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1834 /* Fill Unmap Q memory requirements */
1835 BNAD_FILL_UNMAPQ_MEM_REQ(
1836 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1837 rx_config->num_paths +
1838 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1839 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1841 /* Allocate resource */
1842 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1846 /* Ask BNA to create one Rx object, supplying required resources */
1847 spin_lock_irqsave(&bnad->bna_lock, flags);
1848 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1850 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1855 /* Register ISR for the Rx object */
1856 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1857 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1858 rx_config->num_paths);
1864 bnad_napi_enable(bnad, rx_id);
1866 spin_lock_irqsave(&bnad->bna_lock, flags);
1868 /* Set up Dynamic Interrupt Moderation Vector */
1869 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1870 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1872 /* Enable VLAN filtering only on the default Rx */
1873 bna_rx_vlanfilter_enable(rx);
1875 /* Start the DIM timer */
1876 bnad_dim_timer_start(bnad);
1880 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1885 bnad_cleanup_rx(bnad, rx_id);
1889 /* Called with conf_lock & bnad->bna_lock held */
1891 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1893 struct bnad_tx_info *tx_info;
1895 tx_info = &bnad->tx_info[0];
1899 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1902 /* Called with conf_lock & bnad->bna_lock held */
1904 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1906 struct bnad_rx_info *rx_info;
1909 for (i = 0; i < bnad->num_rx; i++) {
1910 rx_info = &bnad->rx_info[i];
1913 bna_rx_coalescing_timeo_set(rx_info->rx,
1914 bnad->rx_coalescing_timeo);
1919 * Called with bnad->bna_lock held
1922 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1926 if (!is_valid_ether_addr(mac_addr))
1927 return -EADDRNOTAVAIL;
1929 /* If datapath is down, pretend everything went through */
1930 if (!bnad->rx_info[0].rx)
1933 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1934 if (ret != BNA_CB_SUCCESS)
1935 return -EADDRNOTAVAIL;
1940 /* Should be called with conf_lock held */
1942 bnad_enable_default_bcast(struct bnad *bnad)
1944 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1946 unsigned long flags;
1948 init_completion(&bnad->bnad_completions.mcast_comp);
1950 spin_lock_irqsave(&bnad->bna_lock, flags);
1951 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1952 bnad_cb_rx_mcast_add);
1953 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1955 if (ret == BNA_CB_SUCCESS)
1956 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1960 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1966 /* Statistics utilities */
1968 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1972 for (i = 0; i < bnad->num_rx; i++) {
1973 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1974 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1975 stats->rx_packets += bnad->rx_info[i].
1976 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
1977 stats->rx_bytes += bnad->rx_info[i].
1978 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1979 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1980 bnad->rx_info[i].rx_ctrl[j].ccb->
1982 stats->rx_packets +=
1983 bnad->rx_info[i].rx_ctrl[j].
1984 ccb->rcb[1]->rxq->rx_packets;
1986 bnad->rx_info[i].rx_ctrl[j].
1987 ccb->rcb[1]->rxq->rx_bytes;
1992 for (i = 0; i < bnad->num_tx; i++) {
1993 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1994 if (bnad->tx_info[i].tcb[j]) {
1995 stats->tx_packets +=
1996 bnad->tx_info[i].tcb[j]->txq->tx_packets;
1998 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2005 * Must be called with the bna_lock held.
2008 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2010 struct bfi_ll_stats_mac *mac_stats;
2014 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2016 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2017 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2018 mac_stats->rx_undersize;
2019 stats->tx_errors = mac_stats->tx_fcs_error +
2020 mac_stats->tx_undersize;
2021 stats->rx_dropped = mac_stats->rx_drop;
2022 stats->tx_dropped = mac_stats->tx_drop;
2023 stats->multicast = mac_stats->rx_multicast;
2024 stats->collisions = mac_stats->tx_total_collision;
2026 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2028 /* receive ring buffer overflow ?? */
2030 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2031 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2032 /* recv'r fifo overrun */
2033 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2034 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2035 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2037 stats->rx_fifo_errors +=
2038 bnad->stats.bna_stats->
2039 hw_stats->rxf_stats[i].frame_drops;
2047 bnad_mbox_irq_sync(struct bnad *bnad)
2050 unsigned long flags;
2052 spin_lock_irqsave(&bnad->bna_lock, flags);
2053 if (bnad->cfg_flags & BNAD_CF_MSIX)
2054 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2056 irq = bnad->pcidev->irq;
2057 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2059 synchronize_irq(irq);
2062 /* Utility used by bnad_start_xmit, for doing TSO */
2064 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2068 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2069 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2070 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2071 if (skb_header_cloned(skb)) {
2072 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2074 BNAD_UPDATE_CTR(bnad, tso_err);
2080 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2081 * excluding the length field.
2083 if (skb->protocol == htons(ETH_P_IP)) {
2084 struct iphdr *iph = ip_hdr(skb);
2086 /* Do we really need these? */
2090 tcp_hdr(skb)->check =
2091 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2093 BNAD_UPDATE_CTR(bnad, tso4);
2095 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2097 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2098 ipv6h->payload_len = 0;
2099 tcp_hdr(skb)->check =
2100 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2102 BNAD_UPDATE_CTR(bnad, tso6);
2109 * Initialize Q numbers depending on Rx Paths
2110 * Called with bnad->bna_lock held, because of cfg_flags
2114 bnad_q_num_init(struct bnad *bnad)
2118 rxps = min((uint)num_online_cpus(),
2119 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2121 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2122 rxps = 1; /* INTx */
2126 bnad->num_rxp_per_rx = rxps;
2127 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2131 * Adjusts the Q numbers, given a number of msix vectors
2132 * Give preference to RSS as opposed to Tx priority Queues,
2133 * in such a case, just use 1 Tx Q
2134 * Called with bnad->bna_lock held b'cos of cfg_flags access
2137 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2139 bnad->num_txq_per_tx = 1;
2140 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2141 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2142 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2143 bnad->num_rxp_per_rx = msix_vectors -
2144 (bnad->num_tx * bnad->num_txq_per_tx) -
2145 BNAD_MAILBOX_MSIX_VECTORS;
2147 bnad->num_rxp_per_rx = 1;
2151 bnad_set_netdev_perm_addr(struct bnad *bnad)
2153 struct net_device *netdev = bnad->netdev;
2155 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
2156 if (is_zero_ether_addr(netdev->dev_addr))
2157 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
2160 /* Enable / disable device */
2162 bnad_device_disable(struct bnad *bnad)
2164 unsigned long flags;
2166 init_completion(&bnad->bnad_completions.ioc_comp);
2168 spin_lock_irqsave(&bnad->bna_lock, flags);
2169 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2172 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2177 bnad_device_enable(struct bnad *bnad)
2180 unsigned long flags;
2182 init_completion(&bnad->bnad_completions.ioc_comp);
2184 spin_lock_irqsave(&bnad->bna_lock, flags);
2185 bna_device_enable(&bnad->bna.device);
2186 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2190 if (bnad->bnad_completions.ioc_comp_status)
2191 err = bnad->bnad_completions.ioc_comp_status;
2196 /* Free BNA resources */
2198 bnad_res_free(struct bnad *bnad)
2201 struct bna_res_info *res_info = &bnad->res_info[0];
2203 for (i = 0; i < BNA_RES_T_MAX; i++) {
2204 if (res_info[i].res_type == BNA_RES_T_MEM)
2205 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2207 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2211 /* Allocates memory and interrupt resources for BNA */
2213 bnad_res_alloc(struct bnad *bnad)
2216 struct bna_res_info *res_info = &bnad->res_info[0];
2218 for (i = 0; i < BNA_RES_T_MAX; i++) {
2219 if (res_info[i].res_type == BNA_RES_T_MEM)
2220 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2222 err = bnad_mbox_irq_alloc(bnad,
2223 &res_info[i].res_u.intr_info);
2230 bnad_res_free(bnad);
2234 /* Interrupt enable / disable */
2236 bnad_enable_msix(struct bnad *bnad)
2240 unsigned long flags;
2242 spin_lock_irqsave(&bnad->bna_lock, flags);
2243 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2244 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2249 if (bnad->msix_table)
2252 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2255 kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2257 if (!bnad->msix_table)
2260 for (i = 0; i < tot_msix_num; i++)
2261 bnad->msix_table[i].entry = i;
2263 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
2265 /* Not enough MSI-X vectors. */
2267 spin_lock_irqsave(&bnad->bna_lock, flags);
2268 /* ret = #of vectors that we got */
2269 bnad_q_num_adjust(bnad, ret);
2270 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2272 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2274 * bnad->num_rxp_per_rx) +
2275 BNAD_MAILBOX_MSIX_VECTORS;
2276 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2278 /* Try once more with adjusted numbers */
2279 /* If this fails, fall back to INTx */
2280 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2291 kfree(bnad->msix_table);
2292 bnad->msix_table = NULL;
2294 bnad->msix_diag_num = 0;
2295 spin_lock_irqsave(&bnad->bna_lock, flags);
2296 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2297 bnad_q_num_init(bnad);
2298 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2302 bnad_disable_msix(struct bnad *bnad)
2305 unsigned long flags;
2307 spin_lock_irqsave(&bnad->bna_lock, flags);
2308 cfg_flags = bnad->cfg_flags;
2309 if (bnad->cfg_flags & BNAD_CF_MSIX)
2310 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2311 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2313 if (cfg_flags & BNAD_CF_MSIX) {
2314 pci_disable_msix(bnad->pcidev);
2315 kfree(bnad->msix_table);
2316 bnad->msix_table = NULL;
2320 /* Netdev entry points */
2322 bnad_open(struct net_device *netdev)
2325 struct bnad *bnad = netdev_priv(netdev);
2326 struct bna_pause_config pause_config;
2328 unsigned long flags;
2330 mutex_lock(&bnad->conf_mutex);
2333 err = bnad_setup_tx(bnad, 0);
2338 err = bnad_setup_rx(bnad, 0);
2343 pause_config.tx_pause = 0;
2344 pause_config.rx_pause = 0;
2346 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2348 spin_lock_irqsave(&bnad->bna_lock, flags);
2349 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2350 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2351 bna_port_enable(&bnad->bna.port);
2352 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2354 /* Enable broadcast */
2355 bnad_enable_default_bcast(bnad);
2357 /* Set the UCAST address */
2358 spin_lock_irqsave(&bnad->bna_lock, flags);
2359 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2362 /* Start the stats timer */
2363 bnad_stats_timer_start(bnad);
2365 mutex_unlock(&bnad->conf_mutex);
2370 bnad_cleanup_tx(bnad, 0);
2373 mutex_unlock(&bnad->conf_mutex);
2378 bnad_stop(struct net_device *netdev)
2380 struct bnad *bnad = netdev_priv(netdev);
2381 unsigned long flags;
2383 mutex_lock(&bnad->conf_mutex);
2385 /* Stop the stats timer */
2386 bnad_stats_timer_stop(bnad);
2388 init_completion(&bnad->bnad_completions.port_comp);
2390 spin_lock_irqsave(&bnad->bna_lock, flags);
2391 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2392 bnad_cb_port_disabled);
2393 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2395 wait_for_completion(&bnad->bnad_completions.port_comp);
2397 bnad_cleanup_tx(bnad, 0);
2398 bnad_cleanup_rx(bnad, 0);
2400 /* Synchronize mailbox IRQ */
2401 bnad_mbox_irq_sync(bnad);
2403 mutex_unlock(&bnad->conf_mutex);
2410 * bnad_start_xmit : Netdev entry point for Transmit
2411 * Called under lock held by net_device
2414 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2416 struct bnad *bnad = netdev_priv(netdev);
2418 u16 txq_prod, vlan_tag = 0;
2419 u32 unmap_prod, wis, wis_used, wi_range;
2420 u32 vectors, vect_id, i, acked;
2424 struct bnad_tx_info *tx_info;
2425 struct bna_tcb *tcb;
2426 struct bnad_unmap_q *unmap_q;
2427 dma_addr_t dma_addr;
2428 struct bna_txq_entry *txqent;
2429 bna_txq_wi_ctrl_flag_t flags;
2432 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2434 return NETDEV_TX_OK;
2438 * Takes care of the Tx that is scheduled between clearing the flag
2439 * and the netif_stop_queue() call.
2441 if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
2443 return NETDEV_TX_OK;
2448 tx_info = &bnad->tx_info[tx_id];
2449 tcb = tx_info->tcb[tx_id];
2450 unmap_q = tcb->unmap_q;
2452 vectors = 1 + skb_shinfo(skb)->nr_frags;
2453 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2455 return NETDEV_TX_OK;
2457 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2460 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2461 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2462 if ((u16) (*tcb->hw_consumer_index) !=
2463 tcb->consumer_index &&
2464 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2465 acked = bnad_free_txbufs(bnad, tcb);
2466 bna_ib_ack(tcb->i_dbell, acked);
2467 smp_mb__before_clear_bit();
2468 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2470 netif_stop_queue(netdev);
2471 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2476 * Check again to deal with race condition between
2477 * netif_stop_queue here, and netif_wake_queue in
2478 * interrupt handler which is not inside netif tx lock.
2481 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2482 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2483 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2484 return NETDEV_TX_BUSY;
2486 netif_wake_queue(netdev);
2487 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2491 unmap_prod = unmap_q->producer_index;
2496 txq_prod = tcb->producer_index;
2497 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2498 BUG_ON(!(wi_range <= tcb->q_depth));
2499 txqent->hdr.wi.reserved = 0;
2500 txqent->hdr.wi.num_vectors = vectors;
2501 txqent->hdr.wi.opcode =
2502 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2505 if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
2506 vlan_tag = (u16) vlan_tx_tag_get(skb);
2507 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2509 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2511 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2512 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2515 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2517 if (skb_is_gso(skb)) {
2518 err = bnad_tso_prepare(bnad, skb);
2521 return NETDEV_TX_OK;
2523 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2524 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2525 txqent->hdr.wi.l4_hdr_size_n_offset =
2526 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2527 (tcp_hdrlen(skb) >> 2,
2528 skb_transport_offset(skb)));
2529 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2532 txqent->hdr.wi.lso_mss = 0;
2534 if (skb->protocol == htons(ETH_P_IP))
2535 proto = ip_hdr(skb)->protocol;
2536 else if (skb->protocol == htons(ETH_P_IPV6)) {
2537 /* nexthdr may not be TCP immediately. */
2538 proto = ipv6_hdr(skb)->nexthdr;
2540 if (proto == IPPROTO_TCP) {
2541 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2542 txqent->hdr.wi.l4_hdr_size_n_offset =
2543 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2544 (0, skb_transport_offset(skb)));
2546 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2548 BUG_ON(!(skb_headlen(skb) >=
2549 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2551 } else if (proto == IPPROTO_UDP) {
2552 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2553 txqent->hdr.wi.l4_hdr_size_n_offset =
2554 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2555 (0, skb_transport_offset(skb)));
2557 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2559 BUG_ON(!(skb_headlen(skb) >=
2560 skb_transport_offset(skb) +
2561 sizeof(struct udphdr)));
2563 err = skb_checksum_help(skb);
2564 BNAD_UPDATE_CTR(bnad, csum_help);
2567 BNAD_UPDATE_CTR(bnad, csum_help_err);
2568 return NETDEV_TX_OK;
2572 txqent->hdr.wi.lso_mss = 0;
2573 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2576 txqent->hdr.wi.flags = htons(flags);
2578 txqent->hdr.wi.frame_length = htonl(skb->len);
2580 unmap_q->unmap_array[unmap_prod].skb = skb;
2581 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2582 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2583 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2585 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2588 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2589 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2591 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2592 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2593 u32 size = frag->size;
2595 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2600 BNA_QE_INDX_ADD(txq_prod, wis_used,
2603 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2605 BUG_ON(!(wi_range <= tcb->q_depth));
2608 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2611 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2612 txqent->vector[vect_id].length = htons(size);
2614 pci_map_page(bnad->pcidev, frag->page,
2615 frag->page_offset, size,
2617 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2619 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2620 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2623 unmap_q->producer_index = unmap_prod;
2624 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2625 tcb->producer_index = txq_prod;
2628 bna_txq_prod_indx_doorbell(tcb);
2630 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2631 tasklet_schedule(&bnad->tx_free_tasklet);
2633 return NETDEV_TX_OK;
2637 * Used spin_lock to synchronize reading of stats structures, which
2638 * is written by BNA under the same lock.
2640 static struct rtnl_link_stats64 *
2641 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2643 struct bnad *bnad = netdev_priv(netdev);
2644 unsigned long flags;
2646 spin_lock_irqsave(&bnad->bna_lock, flags);
2648 bnad_netdev_qstats_fill(bnad, stats);
2649 bnad_netdev_hwstats_fill(bnad, stats);
2651 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2657 bnad_set_rx_mode(struct net_device *netdev)
2659 struct bnad *bnad = netdev_priv(netdev);
2660 u32 new_mask, valid_mask;
2661 unsigned long flags;
2663 spin_lock_irqsave(&bnad->bna_lock, flags);
2665 new_mask = valid_mask = 0;
2667 if (netdev->flags & IFF_PROMISC) {
2668 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2669 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2670 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2671 bnad->cfg_flags |= BNAD_CF_PROMISC;
2674 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2675 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2676 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2677 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2681 if (netdev->flags & IFF_ALLMULTI) {
2682 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2683 new_mask |= BNA_RXMODE_ALLMULTI;
2684 valid_mask |= BNA_RXMODE_ALLMULTI;
2685 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2688 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2689 new_mask &= ~BNA_RXMODE_ALLMULTI;
2690 valid_mask |= BNA_RXMODE_ALLMULTI;
2691 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2695 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2697 if (!netdev_mc_empty(netdev)) {
2699 int mc_count = netdev_mc_count(netdev);
2701 /* Index 0 holds the broadcast address */
2703 kzalloc((mc_count + 1) * ETH_ALEN,
2708 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2710 /* Copy rest of the MC addresses */
2711 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2713 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2716 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2720 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2724 * bna_lock is used to sync writes to netdev->addr
2725 * conf_lock cannot be used since this call may be made
2726 * in a non-blocking context.
2729 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2732 struct bnad *bnad = netdev_priv(netdev);
2733 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2734 unsigned long flags;
2736 spin_lock_irqsave(&bnad->bna_lock, flags);
2738 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2741 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2743 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2749 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2752 unsigned long flags;
2754 struct bnad *bnad = netdev_priv(netdev);
2756 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2759 mutex_lock(&bnad->conf_mutex);
2761 netdev->mtu = new_mtu;
2763 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2765 spin_lock_irqsave(&bnad->bna_lock, flags);
2766 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2767 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2769 mutex_unlock(&bnad->conf_mutex);
2774 bnad_vlan_rx_register(struct net_device *netdev,
2775 struct vlan_group *vlan_grp)
2777 struct bnad *bnad = netdev_priv(netdev);
2779 mutex_lock(&bnad->conf_mutex);
2780 bnad->vlan_grp = vlan_grp;
2781 mutex_unlock(&bnad->conf_mutex);
2785 bnad_vlan_rx_add_vid(struct net_device *netdev,
2788 struct bnad *bnad = netdev_priv(netdev);
2789 unsigned long flags;
2791 if (!bnad->rx_info[0].rx)
2794 mutex_lock(&bnad->conf_mutex);
2796 spin_lock_irqsave(&bnad->bna_lock, flags);
2797 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2798 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2800 mutex_unlock(&bnad->conf_mutex);
2804 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2807 struct bnad *bnad = netdev_priv(netdev);
2808 unsigned long flags;
2810 if (!bnad->rx_info[0].rx)
2813 mutex_lock(&bnad->conf_mutex);
2815 spin_lock_irqsave(&bnad->bna_lock, flags);
2816 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2817 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2819 mutex_unlock(&bnad->conf_mutex);
2822 #ifdef CONFIG_NET_POLL_CONTROLLER
2824 bnad_netpoll(struct net_device *netdev)
2826 struct bnad *bnad = netdev_priv(netdev);
2827 struct bnad_rx_info *rx_info;
2828 struct bnad_rx_ctrl *rx_ctrl;
2832 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2833 bna_intx_disable(&bnad->bna, curr_mask);
2834 bnad_isr(bnad->pcidev->irq, netdev);
2835 bna_intx_enable(&bnad->bna, curr_mask);
2837 for (i = 0; i < bnad->num_rx; i++) {
2838 rx_info = &bnad->rx_info[i];
2841 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2842 rx_ctrl = &rx_info->rx_ctrl[j];
2844 bnad_disable_rx_irq(bnad,
2846 bnad_netif_rx_schedule_poll(bnad,
2855 static const struct net_device_ops bnad_netdev_ops = {
2856 .ndo_open = bnad_open,
2857 .ndo_stop = bnad_stop,
2858 .ndo_start_xmit = bnad_start_xmit,
2859 .ndo_get_stats64 = bnad_get_stats64,
2860 .ndo_set_rx_mode = bnad_set_rx_mode,
2861 .ndo_set_multicast_list = bnad_set_rx_mode,
2862 .ndo_validate_addr = eth_validate_addr,
2863 .ndo_set_mac_address = bnad_set_mac_address,
2864 .ndo_change_mtu = bnad_change_mtu,
2865 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2866 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2867 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2868 #ifdef CONFIG_NET_POLL_CONTROLLER
2869 .ndo_poll_controller = bnad_netpoll
2874 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2876 struct net_device *netdev = bnad->netdev;
2878 netdev->features |= NETIF_F_IPV6_CSUM;
2879 netdev->features |= NETIF_F_TSO;
2880 netdev->features |= NETIF_F_TSO6;
2882 netdev->features |= NETIF_F_GRO;
2883 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2885 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2888 netdev->features |= NETIF_F_HIGHDMA;
2891 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2892 NETIF_F_HW_VLAN_FILTER;
2894 netdev->vlan_features = netdev->features;
2895 netdev->mem_start = bnad->mmio_start;
2896 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2898 netdev->netdev_ops = &bnad_netdev_ops;
2899 bnad_set_ethtool_ops(netdev);
2903 * 1. Initialize the bnad structure
2904 * 2. Setup netdev pointer in pci_dev
2905 * 3. Initialze Tx free tasklet
2906 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2909 bnad_init(struct bnad *bnad,
2910 struct pci_dev *pdev, struct net_device *netdev)
2912 unsigned long flags;
2914 SET_NETDEV_DEV(netdev, &pdev->dev);
2915 pci_set_drvdata(pdev, netdev);
2917 bnad->netdev = netdev;
2918 bnad->pcidev = pdev;
2919 bnad->mmio_start = pci_resource_start(pdev, 0);
2920 bnad->mmio_len = pci_resource_len(pdev, 0);
2921 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2923 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2924 pci_set_drvdata(pdev, NULL);
2927 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2928 (unsigned long long) bnad->mmio_len);
2930 spin_lock_irqsave(&bnad->bna_lock, flags);
2931 if (!bnad_msix_disable)
2932 bnad->cfg_flags = BNAD_CF_MSIX;
2934 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2936 bnad_q_num_init(bnad);
2937 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2939 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2940 (bnad->num_rx * bnad->num_rxp_per_rx) +
2941 BNAD_MAILBOX_MSIX_VECTORS;
2942 bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
2944 bnad->txq_depth = BNAD_TXQ_DEPTH;
2945 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2946 bnad->rx_csum = true;
2948 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2949 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2951 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2952 (unsigned long)bnad);
2958 * Must be called after bnad_pci_uninit()
2959 * so that iounmap() and pci_set_drvdata(NULL)
2960 * happens only after PCI uninitialization.
2963 bnad_uninit(struct bnad *bnad)
2966 iounmap(bnad->bar0);
2967 pci_set_drvdata(bnad->pcidev, NULL);
2972 a) Per device mutes used for serializing configuration
2973 changes from OS interface
2974 b) spin lock used to protect bna state machine
2977 bnad_lock_init(struct bnad *bnad)
2979 spin_lock_init(&bnad->bna_lock);
2980 mutex_init(&bnad->conf_mutex);
2984 bnad_lock_uninit(struct bnad *bnad)
2986 mutex_destroy(&bnad->conf_mutex);
2989 /* PCI Initialization */
2991 bnad_pci_init(struct bnad *bnad,
2992 struct pci_dev *pdev, bool *using_dac)
2996 err = pci_enable_device(pdev);
2999 err = pci_request_regions(pdev, BNAD_NAME);
3001 goto disable_device;
3002 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3003 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3006 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3008 err = pci_set_consistent_dma_mask(pdev,
3011 goto release_regions;
3015 pci_set_master(pdev);
3019 pci_release_regions(pdev);
3021 pci_disable_device(pdev);
3027 bnad_pci_uninit(struct pci_dev *pdev)
3029 pci_release_regions(pdev);
3030 pci_disable_device(pdev);
3033 static int __devinit
3034 bnad_pci_probe(struct pci_dev *pdev,
3035 const struct pci_device_id *pcidev_id)
3041 struct net_device *netdev;
3042 struct bfa_pcidev pcidev_info;
3043 unsigned long flags;
3045 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3046 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3048 mutex_lock(&bnad_fwimg_mutex);
3049 if (!cna_get_firmware_buf(pdev)) {
3050 mutex_unlock(&bnad_fwimg_mutex);
3051 pr_warn("Failed to load Firmware Image!\n");
3054 mutex_unlock(&bnad_fwimg_mutex);
3057 * Allocates sizeof(struct net_device + struct bnad)
3058 * bnad = netdev->priv
3060 netdev = alloc_etherdev(sizeof(struct bnad));
3062 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3066 bnad = netdev_priv(netdev);
3069 * PCI initialization
3070 * Output : using_dac = 1 for 64 bit DMA
3071 * = 0 for 32 bit DMA
3073 err = bnad_pci_init(bnad, pdev, &using_dac);
3077 bnad_lock_init(bnad);
3079 * Initialize bnad structure
3080 * Setup relation between pci_dev & netdev
3081 * Init Tx free tasklet
3083 err = bnad_init(bnad, pdev, netdev);
3086 /* Initialize netdev structure, set up ethtool ops */
3087 bnad_netdev_init(bnad, using_dac);
3089 bnad_enable_msix(bnad);
3091 /* Get resource requirement form bna */
3092 bna_res_req(&bnad->res_info[0]);
3094 /* Allocate resources from bna */
3095 err = bnad_res_alloc(bnad);
3101 /* Setup pcidev_info for bna_init() */
3102 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3103 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3104 pcidev_info.device_id = bnad->pcidev->device;
3105 pcidev_info.pci_bar_kva = bnad->bar0;
3107 mutex_lock(&bnad->conf_mutex);
3109 spin_lock_irqsave(&bnad->bna_lock, flags);
3110 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3112 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3114 bnad->stats.bna_stats = &bna->stats;
3117 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3118 ((unsigned long)bnad));
3119 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3120 ((unsigned long)bnad));
3121 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
3122 ((unsigned long)bnad));
3124 /* Now start the timer before calling IOC */
3125 mod_timer(&bnad->bna.device.ioc.ioc_timer,
3126 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3130 * Don't care even if err != 0, bna state machine will
3133 err = bnad_device_enable(bnad);
3135 /* Get the burnt-in mac */
3136 spin_lock_irqsave(&bnad->bna_lock, flags);
3137 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3138 bnad_set_netdev_perm_addr(bnad);
3139 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3141 mutex_unlock(&bnad->conf_mutex);
3144 * Make sure the link appears down to the stack
3146 netif_carrier_off(netdev);
3148 /* Finally, reguister with net_device layer */
3149 err = register_netdev(netdev);
3151 pr_err("BNA : Registering with netdev failed\n");
3152 goto disable_device;
3158 mutex_lock(&bnad->conf_mutex);
3159 bnad_device_disable(bnad);
3160 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3161 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3162 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3163 spin_lock_irqsave(&bnad->bna_lock, flags);
3165 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3166 mutex_unlock(&bnad->conf_mutex);
3168 bnad_res_free(bnad);
3169 bnad_disable_msix(bnad);
3171 bnad_pci_uninit(pdev);
3172 bnad_lock_uninit(bnad);
3175 free_netdev(netdev);
3179 static void __devexit
3180 bnad_pci_remove(struct pci_dev *pdev)
3182 struct net_device *netdev = pci_get_drvdata(pdev);
3185 unsigned long flags;
3190 pr_info("%s bnad_pci_remove\n", netdev->name);
3191 bnad = netdev_priv(netdev);
3194 unregister_netdev(netdev);
3196 mutex_lock(&bnad->conf_mutex);
3197 bnad_device_disable(bnad);
3198 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3199 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3200 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3201 spin_lock_irqsave(&bnad->bna_lock, flags);
3203 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3204 mutex_unlock(&bnad->conf_mutex);
3206 bnad_res_free(bnad);
3207 bnad_disable_msix(bnad);
3208 bnad_pci_uninit(pdev);
3209 bnad_lock_uninit(bnad);
3211 free_netdev(netdev);
3214 const struct pci_device_id bnad_pci_id_table[] = {
3216 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3217 PCI_DEVICE_ID_BROCADE_CT),
3218 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3219 .class_mask = 0xffff00
3223 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3225 static struct pci_driver bnad_pci_driver = {
3227 .id_table = bnad_pci_id_table,
3228 .probe = bnad_pci_probe,
3229 .remove = __devexit_p(bnad_pci_remove),
3233 bnad_module_init(void)
3237 pr_info("Brocade 10G Ethernet driver\n");
3239 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3241 err = pci_register_driver(&bnad_pci_driver);
3243 pr_err("bna : PCI registration failed in module init "
3252 bnad_module_exit(void)
3254 pci_unregister_driver(&bnad_pci_driver);
3257 release_firmware(bfi_fw);
3260 module_init(bnad_module_init);
3261 module_exit(bnad_module_exit);
3263 MODULE_AUTHOR("Brocade");
3264 MODULE_LICENSE("GPL");
3265 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3266 MODULE_VERSION(BNAD_VERSION);
3267 MODULE_FIRMWARE(CNA_FW_FILE_CT);