Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild-2.6
[pandora-kernel.git] / drivers / net / bna / bnad.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28
29 #include "bnad.h"
30 #include "bna.h"
31 #include "cna.h"
32
33 static DEFINE_MUTEX(bnad_fwimg_mutex);
34
35 /*
36  * Module params
37  */
38 static uint bnad_msix_disable;
39 module_param(bnad_msix_disable, uint, 0444);
40 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42 static uint bnad_ioc_auto_recover = 1;
43 module_param(bnad_ioc_auto_recover, uint, 0444);
44 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46 /*
47  * Global variables
48  */
49 u32 bnad_rxqs_per_cq = 2;
50
51 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52
53 /*
54  * Local MACROS
55  */
56 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
61         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
62          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
63          ((_bnad)->pcidev->irq))
64
65 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)       \
66 do {                                                            \
67         (_res_info)->res_type = BNA_RES_T_MEM;                  \
68         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
69         (_res_info)->res_u.mem_info.num = (_num);               \
70         (_res_info)->res_u.mem_info.len =                       \
71         sizeof(struct bnad_unmap_q) +                           \
72         (sizeof(struct bnad_skb_unmap) * ((_depth) - 1));       \
73 } while (0)
74
75 #define BNAD_TXRX_SYNC_MDELAY   250     /* 250 msecs */
76
77 /*
78  * Reinitialize completions in CQ, once Rx is taken down
79  */
80 static void
81 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82 {
83         struct bna_cq_entry *cmpl, *next_cmpl;
84         unsigned int wi_range, wis = 0, ccb_prod = 0;
85         int i;
86
87         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88                             wi_range);
89
90         for (i = 0; i < ccb->q_depth; i++) {
91                 wis++;
92                 if (likely(--wi_range))
93                         next_cmpl = cmpl + 1;
94                 else {
95                         BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96                         wis = 0;
97                         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98                                                 next_cmpl, wi_range);
99                 }
100                 cmpl->valid = 0;
101                 cmpl = next_cmpl;
102         }
103 }
104
105 /*
106  * Frees all pending Tx Bufs
107  * At this point no activity is expected on the Q,
108  * so DMA unmap & freeing is fine.
109  */
110 static void
111 bnad_free_all_txbufs(struct bnad *bnad,
112                  struct bna_tcb *tcb)
113 {
114         u32             unmap_cons;
115         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
116         struct bnad_skb_unmap *unmap_array;
117         struct sk_buff          *skb = NULL;
118         int                     i;
119
120         unmap_array = unmap_q->unmap_array;
121
122         unmap_cons = 0;
123         while (unmap_cons < unmap_q->q_depth) {
124                 skb = unmap_array[unmap_cons].skb;
125                 if (!skb) {
126                         unmap_cons++;
127                         continue;
128                 }
129                 unmap_array[unmap_cons].skb = NULL;
130
131                 dma_unmap_single(&bnad->pcidev->dev,
132                                  dma_unmap_addr(&unmap_array[unmap_cons],
133                                                 dma_addr), skb_headlen(skb),
134                                                 DMA_TO_DEVICE);
135
136                 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
137                 if (++unmap_cons >= unmap_q->q_depth)
138                         break;
139
140                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
141                         dma_unmap_page(&bnad->pcidev->dev,
142                                        dma_unmap_addr(&unmap_array[unmap_cons],
143                                                       dma_addr),
144                                        skb_shinfo(skb)->frags[i].size,
145                                        DMA_TO_DEVICE);
146                         dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
147                                            0);
148                         if (++unmap_cons >= unmap_q->q_depth)
149                                 break;
150                 }
151                 dev_kfree_skb_any(skb);
152         }
153 }
154
155 /* Data Path Handlers */
156
157 /*
158  * bnad_free_txbufs : Frees the Tx bufs on Tx completion
159  * Can be called in a) Interrupt context
160  *                  b) Sending context
161  *                  c) Tasklet context
162  */
163 static u32
164 bnad_free_txbufs(struct bnad *bnad,
165                  struct bna_tcb *tcb)
166 {
167         u32             sent_packets = 0, sent_bytes = 0;
168         u16             wis, unmap_cons, updated_hw_cons;
169         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
170         struct bnad_skb_unmap *unmap_array;
171         struct sk_buff          *skb;
172         int i;
173
174         /*
175          * Just return if TX is stopped. This check is useful
176          * when bnad_free_txbufs() runs out of a tasklet scheduled
177          * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
178          * but this routine runs actually after the cleanup has been
179          * executed.
180          */
181         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
182                 return 0;
183
184         updated_hw_cons = *(tcb->hw_consumer_index);
185
186         wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
187                                   updated_hw_cons, tcb->q_depth);
188
189         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
190
191         unmap_array = unmap_q->unmap_array;
192         unmap_cons = unmap_q->consumer_index;
193
194         prefetch(&unmap_array[unmap_cons + 1]);
195         while (wis) {
196                 skb = unmap_array[unmap_cons].skb;
197
198                 unmap_array[unmap_cons].skb = NULL;
199
200                 sent_packets++;
201                 sent_bytes += skb->len;
202                 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203
204                 dma_unmap_single(&bnad->pcidev->dev,
205                                  dma_unmap_addr(&unmap_array[unmap_cons],
206                                                 dma_addr), skb_headlen(skb),
207                                  DMA_TO_DEVICE);
208                 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
209                 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
210
211                 prefetch(&unmap_array[unmap_cons + 1]);
212                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
213                         prefetch(&unmap_array[unmap_cons + 1]);
214
215                         dma_unmap_page(&bnad->pcidev->dev,
216                                        dma_unmap_addr(&unmap_array[unmap_cons],
217                                                       dma_addr),
218                                        skb_shinfo(skb)->frags[i].size,
219                                        DMA_TO_DEVICE);
220                         dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
221                                            0);
222                         BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
223                 }
224                 dev_kfree_skb_any(skb);
225         }
226
227         /* Update consumer pointers. */
228         tcb->consumer_index = updated_hw_cons;
229         unmap_q->consumer_index = unmap_cons;
230
231         tcb->txq->tx_packets += sent_packets;
232         tcb->txq->tx_bytes += sent_bytes;
233
234         return sent_packets;
235 }
236
237 /* Tx Free Tasklet function */
238 /* Frees for all the tcb's in all the Tx's */
239 /*
240  * Scheduled from sending context, so that
241  * the fat Tx lock is not held for too long
242  * in the sending context.
243  */
244 static void
245 bnad_tx_free_tasklet(unsigned long bnad_ptr)
246 {
247         struct bnad *bnad = (struct bnad *)bnad_ptr;
248         struct bna_tcb *tcb;
249         u32             acked = 0;
250         int                     i, j;
251
252         for (i = 0; i < bnad->num_tx; i++) {
253                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
254                         tcb = bnad->tx_info[i].tcb[j];
255                         if (!tcb)
256                                 continue;
257                         if (((u16) (*tcb->hw_consumer_index) !=
258                                 tcb->consumer_index) &&
259                                 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
260                                                   &tcb->flags))) {
261                                 acked = bnad_free_txbufs(bnad, tcb);
262                                 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
263                                         &tcb->flags)))
264                                         bna_ib_ack(tcb->i_dbell, acked);
265                                 smp_mb__before_clear_bit();
266                                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
267                         }
268                         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
269                                                 &tcb->flags)))
270                                 continue;
271                         if (netif_queue_stopped(bnad->netdev)) {
272                                 if (acked && netif_carrier_ok(bnad->netdev) &&
273                                         BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
274                                                 BNAD_NETIF_WAKE_THRESHOLD) {
275                                         netif_wake_queue(bnad->netdev);
276                                         /* TODO */
277                                         /* Counters for individual TxQs? */
278                                         BNAD_UPDATE_CTR(bnad,
279                                                 netif_queue_wakeup);
280                                 }
281                         }
282                 }
283         }
284 }
285
286 static u32
287 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
288 {
289         struct net_device *netdev = bnad->netdev;
290         u32 sent = 0;
291
292         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
293                 return 0;
294
295         sent = bnad_free_txbufs(bnad, tcb);
296         if (sent) {
297                 if (netif_queue_stopped(netdev) &&
298                     netif_carrier_ok(netdev) &&
299                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
300                                     BNAD_NETIF_WAKE_THRESHOLD) {
301                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
302                                 netif_wake_queue(netdev);
303                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
304                         }
305                 }
306         }
307
308         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
309                 bna_ib_ack(tcb->i_dbell, sent);
310
311         smp_mb__before_clear_bit();
312         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
313
314         return sent;
315 }
316
317 /* MSIX Tx Completion Handler */
318 static irqreturn_t
319 bnad_msix_tx(int irq, void *data)
320 {
321         struct bna_tcb *tcb = (struct bna_tcb *)data;
322         struct bnad *bnad = tcb->bnad;
323
324         bnad_tx(bnad, tcb);
325
326         return IRQ_HANDLED;
327 }
328
329 static void
330 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
331 {
332         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
333
334         rcb->producer_index = 0;
335         rcb->consumer_index = 0;
336
337         unmap_q->producer_index = 0;
338         unmap_q->consumer_index = 0;
339 }
340
341 static void
342 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
343 {
344         struct bnad_unmap_q *unmap_q;
345         struct bnad_skb_unmap *unmap_array;
346         struct sk_buff *skb;
347         int unmap_cons;
348
349         unmap_q = rcb->unmap_q;
350         unmap_array = unmap_q->unmap_array;
351         for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
352                 skb = unmap_array[unmap_cons].skb;
353                 if (!skb)
354                         continue;
355                 unmap_array[unmap_cons].skb = NULL;
356                 dma_unmap_single(&bnad->pcidev->dev,
357                                  dma_unmap_addr(&unmap_array[unmap_cons],
358                                                 dma_addr),
359                                  rcb->rxq->buffer_size,
360                                  DMA_FROM_DEVICE);
361                 dev_kfree_skb(skb);
362         }
363         bnad_reset_rcb(bnad, rcb);
364 }
365
366 static void
367 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
368 {
369         u16 to_alloc, alloced, unmap_prod, wi_range;
370         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
371         struct bnad_skb_unmap *unmap_array;
372         struct bna_rxq_entry *rxent;
373         struct sk_buff *skb;
374         dma_addr_t dma_addr;
375
376         alloced = 0;
377         to_alloc =
378                 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
379
380         unmap_array = unmap_q->unmap_array;
381         unmap_prod = unmap_q->producer_index;
382
383         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
384
385         while (to_alloc--) {
386                 if (!wi_range) {
387                         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
388                                              wi_range);
389                 }
390                 skb = netdev_alloc_skb_ip_align(bnad->netdev,
391                                                 rcb->rxq->buffer_size);
392                 if (unlikely(!skb)) {
393                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394                         goto finishing;
395                 }
396                 unmap_array[unmap_prod].skb = skb;
397                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
398                                           rcb->rxq->buffer_size,
399                                           DMA_FROM_DEVICE);
400                 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
401                                    dma_addr);
402                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
403                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
404
405                 rxent++;
406                 wi_range--;
407                 alloced++;
408         }
409
410 finishing:
411         if (likely(alloced)) {
412                 unmap_q->producer_index = unmap_prod;
413                 rcb->producer_index = unmap_prod;
414                 smp_mb();
415                 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
416                         bna_rxq_prod_indx_doorbell(rcb);
417         }
418 }
419
420 static inline void
421 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
422 {
423         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
424
425         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
426                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
427                          >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
428                         bnad_alloc_n_post_rxbufs(bnad, rcb);
429                 smp_mb__before_clear_bit();
430                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
431         }
432 }
433
434 static u32
435 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
436 {
437         struct bna_cq_entry *cmpl, *next_cmpl;
438         struct bna_rcb *rcb = NULL;
439         unsigned int wi_range, packets = 0, wis = 0;
440         struct bnad_unmap_q *unmap_q;
441         struct bnad_skb_unmap *unmap_array;
442         struct sk_buff *skb;
443         u32 flags, unmap_cons;
444         u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
445         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
446
447         if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
448                 return 0;
449
450         prefetch(bnad->netdev);
451         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
452                             wi_range);
453         BUG_ON(!(wi_range <= ccb->q_depth));
454         while (cmpl->valid && packets < budget) {
455                 packets++;
456                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
457
458                 if (qid0 == cmpl->rxq_id)
459                         rcb = ccb->rcb[0];
460                 else
461                         rcb = ccb->rcb[1];
462
463                 unmap_q = rcb->unmap_q;
464                 unmap_array = unmap_q->unmap_array;
465                 unmap_cons = unmap_q->consumer_index;
466
467                 skb = unmap_array[unmap_cons].skb;
468                 BUG_ON(!(skb));
469                 unmap_array[unmap_cons].skb = NULL;
470                 dma_unmap_single(&bnad->pcidev->dev,
471                                  dma_unmap_addr(&unmap_array[unmap_cons],
472                                                 dma_addr),
473                                  rcb->rxq->buffer_size,
474                                  DMA_FROM_DEVICE);
475                 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
476
477                 /* Should be more efficient ? Performance ? */
478                 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
479
480                 wis++;
481                 if (likely(--wi_range))
482                         next_cmpl = cmpl + 1;
483                 else {
484                         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
485                         wis = 0;
486                         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
487                                                 next_cmpl, wi_range);
488                         BUG_ON(!(wi_range <= ccb->q_depth));
489                 }
490                 prefetch(next_cmpl);
491
492                 flags = ntohl(cmpl->flags);
493                 if (unlikely
494                     (flags &
495                      (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
496                       BNA_CQ_EF_TOO_LONG))) {
497                         dev_kfree_skb_any(skb);
498                         rcb->rxq->rx_packets_with_error++;
499                         goto next;
500                 }
501
502                 skb_put(skb, ntohs(cmpl->length));
503                 if (likely
504                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
505                      (((flags & BNA_CQ_EF_IPV4) &&
506                       (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
507                       (flags & BNA_CQ_EF_IPV6)) &&
508                       (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
509                       (flags & BNA_CQ_EF_L4_CKSUM_OK)))
510                         skb->ip_summed = CHECKSUM_UNNECESSARY;
511                 else
512                         skb_checksum_none_assert(skb);
513
514                 rcb->rxq->rx_packets++;
515                 rcb->rxq->rx_bytes += skb->len;
516                 skb->protocol = eth_type_trans(skb, bnad->netdev);
517
518                 if (flags & BNA_CQ_EF_VLAN)
519                         __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
520
521                 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
522                         struct bnad_rx_ctrl *rx_ctrl;
523
524                         rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
525                         napi_gro_receive(&rx_ctrl->napi, skb);
526                 } else {
527                         netif_receive_skb(skb);
528                 }
529
530 next:
531                 cmpl->valid = 0;
532                 cmpl = next_cmpl;
533         }
534
535         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
536
537         if (likely(ccb)) {
538                 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
539                         bna_ib_ack(ccb->i_dbell, packets);
540                 bnad_refill_rxq(bnad, ccb->rcb[0]);
541                 if (ccb->rcb[1])
542                         bnad_refill_rxq(bnad, ccb->rcb[1]);
543         } else {
544                 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
545                         bna_ib_ack(ccb->i_dbell, 0);
546         }
547
548         return packets;
549 }
550
551 static void
552 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
553 {
554         if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
555                 return;
556
557         bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
558         bna_ib_ack(ccb->i_dbell, 0);
559 }
560
561 static void
562 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
563 {
564         unsigned long flags;
565
566         /* Because of polling context */
567         spin_lock_irqsave(&bnad->bna_lock, flags);
568         bnad_enable_rx_irq_unsafe(ccb);
569         spin_unlock_irqrestore(&bnad->bna_lock, flags);
570 }
571
572 static void
573 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
574 {
575         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
576         struct napi_struct *napi = &rx_ctrl->napi;
577
578         if (likely(napi_schedule_prep(napi))) {
579                 bnad_disable_rx_irq(bnad, ccb);
580                 __napi_schedule(napi);
581         }
582         BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
583 }
584
585 /* MSIX Rx Path Handler */
586 static irqreturn_t
587 bnad_msix_rx(int irq, void *data)
588 {
589         struct bna_ccb *ccb = (struct bna_ccb *)data;
590         struct bnad *bnad = ccb->bnad;
591
592         bnad_netif_rx_schedule_poll(bnad, ccb);
593
594         return IRQ_HANDLED;
595 }
596
597 /* Interrupt handlers */
598
599 /* Mbox Interrupt Handlers */
600 static irqreturn_t
601 bnad_msix_mbox_handler(int irq, void *data)
602 {
603         u32 intr_status;
604         unsigned long flags;
605         struct bnad *bnad = (struct bnad *)data;
606
607         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
608                 return IRQ_HANDLED;
609
610         spin_lock_irqsave(&bnad->bna_lock, flags);
611
612         bna_intr_status_get(&bnad->bna, intr_status);
613
614         if (BNA_IS_MBOX_ERR_INTR(intr_status))
615                 bna_mbox_handler(&bnad->bna, intr_status);
616
617         spin_unlock_irqrestore(&bnad->bna_lock, flags);
618
619         return IRQ_HANDLED;
620 }
621
622 static irqreturn_t
623 bnad_isr(int irq, void *data)
624 {
625         int i, j;
626         u32 intr_status;
627         unsigned long flags;
628         struct bnad *bnad = (struct bnad *)data;
629         struct bnad_rx_info *rx_info;
630         struct bnad_rx_ctrl *rx_ctrl;
631
632         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
633                 return IRQ_NONE;
634
635         bna_intr_status_get(&bnad->bna, intr_status);
636
637         if (unlikely(!intr_status))
638                 return IRQ_NONE;
639
640         spin_lock_irqsave(&bnad->bna_lock, flags);
641
642         if (BNA_IS_MBOX_ERR_INTR(intr_status))
643                 bna_mbox_handler(&bnad->bna, intr_status);
644
645         spin_unlock_irqrestore(&bnad->bna_lock, flags);
646
647         if (!BNA_IS_INTX_DATA_INTR(intr_status))
648                 return IRQ_HANDLED;
649
650         /* Process data interrupts */
651         /* Tx processing */
652         for (i = 0; i < bnad->num_tx; i++) {
653                 for (j = 0; j < bnad->num_txq_per_tx; j++)
654                         bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
655         }
656         /* Rx processing */
657         for (i = 0; i < bnad->num_rx; i++) {
658                 rx_info = &bnad->rx_info[i];
659                 if (!rx_info->rx)
660                         continue;
661                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
662                         rx_ctrl = &rx_info->rx_ctrl[j];
663                         if (rx_ctrl->ccb)
664                                 bnad_netif_rx_schedule_poll(bnad,
665                                                             rx_ctrl->ccb);
666                 }
667         }
668         return IRQ_HANDLED;
669 }
670
671 /*
672  * Called in interrupt / callback context
673  * with bna_lock held, so cfg_flags access is OK
674  */
675 static void
676 bnad_enable_mbox_irq(struct bnad *bnad)
677 {
678         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
679
680         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
681 }
682
683 /*
684  * Called with bnad->bna_lock held b'cos of
685  * bnad->cfg_flags access.
686  */
687 static void
688 bnad_disable_mbox_irq(struct bnad *bnad)
689 {
690         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
691
692         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
693 }
694
695 static void
696 bnad_set_netdev_perm_addr(struct bnad *bnad)
697 {
698         struct net_device *netdev = bnad->netdev;
699
700         memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
701         if (is_zero_ether_addr(netdev->dev_addr))
702                 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
703 }
704
705 /* Control Path Handlers */
706
707 /* Callbacks */
708 void
709 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
710 {
711         bnad_enable_mbox_irq(bnad);
712 }
713
714 void
715 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
716 {
717         bnad_disable_mbox_irq(bnad);
718 }
719
720 void
721 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
722 {
723         complete(&bnad->bnad_completions.ioc_comp);
724         bnad->bnad_completions.ioc_comp_status = status;
725 }
726
727 void
728 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
729 {
730         complete(&bnad->bnad_completions.ioc_comp);
731         bnad->bnad_completions.ioc_comp_status = status;
732 }
733
734 static void
735 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
736 {
737         struct bnad *bnad = (struct bnad *)arg;
738
739         complete(&bnad->bnad_completions.port_comp);
740
741         netif_carrier_off(bnad->netdev);
742 }
743
744 void
745 bnad_cb_port_link_status(struct bnad *bnad,
746                         enum bna_link_status link_status)
747 {
748         bool link_up = 0;
749
750         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
751
752         if (link_status == BNA_CEE_UP) {
753                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
754                 BNAD_UPDATE_CTR(bnad, cee_up);
755         } else
756                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
757
758         if (link_up) {
759                 if (!netif_carrier_ok(bnad->netdev)) {
760                         struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
761                         if (!tcb)
762                                 return;
763                         pr_warn("bna: %s link up\n",
764                                 bnad->netdev->name);
765                         netif_carrier_on(bnad->netdev);
766                         BNAD_UPDATE_CTR(bnad, link_toggle);
767                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
768                                 /* Force an immediate Transmit Schedule */
769                                 pr_info("bna: %s TX_STARTED\n",
770                                         bnad->netdev->name);
771                                 netif_wake_queue(bnad->netdev);
772                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
773                         } else {
774                                 netif_stop_queue(bnad->netdev);
775                                 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
776                         }
777                 }
778         } else {
779                 if (netif_carrier_ok(bnad->netdev)) {
780                         pr_warn("bna: %s link down\n",
781                                 bnad->netdev->name);
782                         netif_carrier_off(bnad->netdev);
783                         BNAD_UPDATE_CTR(bnad, link_toggle);
784                 }
785         }
786 }
787
788 static void
789 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
790                         enum bna_cb_status status)
791 {
792         struct bnad *bnad = (struct bnad *)arg;
793
794         complete(&bnad->bnad_completions.tx_comp);
795 }
796
797 static void
798 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
799 {
800         struct bnad_tx_info *tx_info =
801                         (struct bnad_tx_info *)tcb->txq->tx->priv;
802         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
803
804         tx_info->tcb[tcb->id] = tcb;
805         unmap_q->producer_index = 0;
806         unmap_q->consumer_index = 0;
807         unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
808 }
809
810 static void
811 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
812 {
813         struct bnad_tx_info *tx_info =
814                         (struct bnad_tx_info *)tcb->txq->tx->priv;
815         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
816
817         while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
818                 cpu_relax();
819
820         bnad_free_all_txbufs(bnad, tcb);
821
822         unmap_q->producer_index = 0;
823         unmap_q->consumer_index = 0;
824
825         smp_mb__before_clear_bit();
826         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
827
828         tx_info->tcb[tcb->id] = NULL;
829 }
830
831 static void
832 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
833 {
834         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
835
836         unmap_q->producer_index = 0;
837         unmap_q->consumer_index = 0;
838         unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
839 }
840
841 static void
842 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
843 {
844         bnad_free_all_rxbufs(bnad, rcb);
845 }
846
847 static void
848 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
849 {
850         struct bnad_rx_info *rx_info =
851                         (struct bnad_rx_info *)ccb->cq->rx->priv;
852
853         rx_info->rx_ctrl[ccb->id].ccb = ccb;
854         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
855 }
856
857 static void
858 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
859 {
860         struct bnad_rx_info *rx_info =
861                         (struct bnad_rx_info *)ccb->cq->rx->priv;
862
863         rx_info->rx_ctrl[ccb->id].ccb = NULL;
864 }
865
866 static void
867 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
868 {
869         struct bnad_tx_info *tx_info =
870                         (struct bnad_tx_info *)tcb->txq->tx->priv;
871
872         if (tx_info != &bnad->tx_info[0])
873                 return;
874
875         clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
876         netif_stop_queue(bnad->netdev);
877         pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
878 }
879
880 static void
881 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
882 {
883         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
884
885         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
886                 return;
887
888         clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
889
890         while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
891                 cpu_relax();
892
893         bnad_free_all_txbufs(bnad, tcb);
894
895         unmap_q->producer_index = 0;
896         unmap_q->consumer_index = 0;
897
898         smp_mb__before_clear_bit();
899         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
900
901         /*
902          * Workaround for first device enable failure & we
903          * get a 0 MAC address. We try to get the MAC address
904          * again here.
905          */
906         if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
907                 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
908                 bnad_set_netdev_perm_addr(bnad);
909         }
910
911         set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
912
913         if (netif_carrier_ok(bnad->netdev)) {
914                 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
915                 netif_wake_queue(bnad->netdev);
916                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
917         }
918 }
919
920 static void
921 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
922 {
923         /* Delay only once for the whole Tx Path Shutdown */
924         if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
925                 mdelay(BNAD_TXRX_SYNC_MDELAY);
926 }
927
928 static void
929 bnad_cb_rx_cleanup(struct bnad *bnad,
930                         struct bna_ccb *ccb)
931 {
932         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
933
934         if (ccb->rcb[1])
935                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
936
937         if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
938                 mdelay(BNAD_TXRX_SYNC_MDELAY);
939 }
940
941 static void
942 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
943 {
944         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
945
946         clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
947
948         if (rcb == rcb->cq->ccb->rcb[0])
949                 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
950
951         bnad_free_all_rxbufs(bnad, rcb);
952
953         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
954
955         /* Now allocate & post buffers for this RCB */
956         /* !!Allocation in callback context */
957         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
958                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
959                          >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
960                         bnad_alloc_n_post_rxbufs(bnad, rcb);
961                 smp_mb__before_clear_bit();
962                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
963         }
964 }
965
966 static void
967 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
968                         enum bna_cb_status status)
969 {
970         struct bnad *bnad = (struct bnad *)arg;
971
972         complete(&bnad->bnad_completions.rx_comp);
973 }
974
975 static void
976 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
977                                 enum bna_cb_status status)
978 {
979         bnad->bnad_completions.mcast_comp_status = status;
980         complete(&bnad->bnad_completions.mcast_comp);
981 }
982
983 void
984 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
985                        struct bna_stats *stats)
986 {
987         if (status == BNA_CB_SUCCESS)
988                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
989
990         if (!netif_running(bnad->netdev) ||
991                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
992                 return;
993
994         mod_timer(&bnad->stats_timer,
995                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
996 }
997
998 /* Resource allocation, free functions */
999
1000 static void
1001 bnad_mem_free(struct bnad *bnad,
1002               struct bna_mem_info *mem_info)
1003 {
1004         int i;
1005         dma_addr_t dma_pa;
1006
1007         if (mem_info->mdl == NULL)
1008                 return;
1009
1010         for (i = 0; i < mem_info->num; i++) {
1011                 if (mem_info->mdl[i].kva != NULL) {
1012                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1013                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1014                                                 dma_pa);
1015                                 dma_free_coherent(&bnad->pcidev->dev,
1016                                                   mem_info->mdl[i].len,
1017                                                   mem_info->mdl[i].kva, dma_pa);
1018                         } else
1019                                 kfree(mem_info->mdl[i].kva);
1020                 }
1021         }
1022         kfree(mem_info->mdl);
1023         mem_info->mdl = NULL;
1024 }
1025
1026 static int
1027 bnad_mem_alloc(struct bnad *bnad,
1028                struct bna_mem_info *mem_info)
1029 {
1030         int i;
1031         dma_addr_t dma_pa;
1032
1033         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1034                 mem_info->mdl = NULL;
1035                 return 0;
1036         }
1037
1038         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1039                                 GFP_KERNEL);
1040         if (mem_info->mdl == NULL)
1041                 return -ENOMEM;
1042
1043         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1044                 for (i = 0; i < mem_info->num; i++) {
1045                         mem_info->mdl[i].len = mem_info->len;
1046                         mem_info->mdl[i].kva =
1047                                 dma_alloc_coherent(&bnad->pcidev->dev,
1048                                                 mem_info->len, &dma_pa,
1049                                                 GFP_KERNEL);
1050
1051                         if (mem_info->mdl[i].kva == NULL)
1052                                 goto err_return;
1053
1054                         BNA_SET_DMA_ADDR(dma_pa,
1055                                          &(mem_info->mdl[i].dma));
1056                 }
1057         } else {
1058                 for (i = 0; i < mem_info->num; i++) {
1059                         mem_info->mdl[i].len = mem_info->len;
1060                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1061                                                         GFP_KERNEL);
1062                         if (mem_info->mdl[i].kva == NULL)
1063                                 goto err_return;
1064                 }
1065         }
1066
1067         return 0;
1068
1069 err_return:
1070         bnad_mem_free(bnad, mem_info);
1071         return -ENOMEM;
1072 }
1073
1074 /* Free IRQ for Mailbox */
1075 static void
1076 bnad_mbox_irq_free(struct bnad *bnad,
1077                    struct bna_intr_info *intr_info)
1078 {
1079         int irq;
1080         unsigned long flags;
1081
1082         if (intr_info->idl == NULL)
1083                 return;
1084
1085         spin_lock_irqsave(&bnad->bna_lock, flags);
1086         bnad_disable_mbox_irq(bnad);
1087         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1088
1089         irq = BNAD_GET_MBOX_IRQ(bnad);
1090         free_irq(irq, bnad);
1091
1092         kfree(intr_info->idl);
1093 }
1094
1095 /*
1096  * Allocates IRQ for Mailbox, but keep it disabled
1097  * This will be enabled once we get the mbox enable callback
1098  * from bna
1099  */
1100 static int
1101 bnad_mbox_irq_alloc(struct bnad *bnad,
1102                     struct bna_intr_info *intr_info)
1103 {
1104         int             err = 0;
1105         unsigned long   irq_flags, flags;
1106         u32     irq;
1107         irq_handler_t   irq_handler;
1108
1109         /* Mbox should use only 1 vector */
1110
1111         intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1112         if (!intr_info->idl)
1113                 return -ENOMEM;
1114
1115         spin_lock_irqsave(&bnad->bna_lock, flags);
1116         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1117                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1118                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1119                 irq_flags = 0;
1120                 intr_info->intr_type = BNA_INTR_T_MSIX;
1121                 intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
1122         } else {
1123                 irq_handler = (irq_handler_t)bnad_isr;
1124                 irq = bnad->pcidev->irq;
1125                 irq_flags = IRQF_SHARED;
1126                 intr_info->intr_type = BNA_INTR_T_INTX;
1127         }
1128
1129         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1130         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1131
1132         /*
1133          * Set the Mbox IRQ disable flag, so that the IRQ handler
1134          * called from request_irq() for SHARED IRQs do not execute
1135          */
1136         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1137
1138         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1139
1140         err = request_irq(irq, irq_handler, irq_flags,
1141                           bnad->mbox_irq_name, bnad);
1142
1143         if (err) {
1144                 kfree(intr_info->idl);
1145                 intr_info->idl = NULL;
1146         }
1147
1148         return err;
1149 }
1150
1151 static void
1152 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1153 {
1154         kfree(intr_info->idl);
1155         intr_info->idl = NULL;
1156 }
1157
1158 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1159 static int
1160 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1161                     uint txrx_id, struct bna_intr_info *intr_info)
1162 {
1163         int i, vector_start = 0;
1164         u32 cfg_flags;
1165         unsigned long flags;
1166
1167         spin_lock_irqsave(&bnad->bna_lock, flags);
1168         cfg_flags = bnad->cfg_flags;
1169         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1170
1171         if (cfg_flags & BNAD_CF_MSIX) {
1172                 intr_info->intr_type = BNA_INTR_T_MSIX;
1173                 intr_info->idl = kcalloc(intr_info->num,
1174                                         sizeof(struct bna_intr_descr),
1175                                         GFP_KERNEL);
1176                 if (!intr_info->idl)
1177                         return -ENOMEM;
1178
1179                 switch (src) {
1180                 case BNAD_INTR_TX:
1181                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1182                         break;
1183
1184                 case BNAD_INTR_RX:
1185                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1186                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1187                                         txrx_id;
1188                         break;
1189
1190                 default:
1191                         BUG();
1192                 }
1193
1194                 for (i = 0; i < intr_info->num; i++)
1195                         intr_info->idl[i].vector = vector_start + i;
1196         } else {
1197                 intr_info->intr_type = BNA_INTR_T_INTX;
1198                 intr_info->num = 1;
1199                 intr_info->idl = kcalloc(intr_info->num,
1200                                         sizeof(struct bna_intr_descr),
1201                                         GFP_KERNEL);
1202                 if (!intr_info->idl)
1203                         return -ENOMEM;
1204
1205                 switch (src) {
1206                 case BNAD_INTR_TX:
1207                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1208                         break;
1209
1210                 case BNAD_INTR_RX:
1211                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1212                         break;
1213                 }
1214         }
1215         return 0;
1216 }
1217
1218 /**
1219  * NOTE: Should be called for MSIX only
1220  * Unregisters Tx MSIX vector(s) from the kernel
1221  */
1222 static void
1223 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1224                         int num_txqs)
1225 {
1226         int i;
1227         int vector_num;
1228
1229         for (i = 0; i < num_txqs; i++) {
1230                 if (tx_info->tcb[i] == NULL)
1231                         continue;
1232
1233                 vector_num = tx_info->tcb[i]->intr_vector;
1234                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1235         }
1236 }
1237
1238 /**
1239  * NOTE: Should be called for MSIX only
1240  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1241  */
1242 static int
1243 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1244                         uint tx_id, int num_txqs)
1245 {
1246         int i;
1247         int err;
1248         int vector_num;
1249
1250         for (i = 0; i < num_txqs; i++) {
1251                 vector_num = tx_info->tcb[i]->intr_vector;
1252                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1253                                 tx_id + tx_info->tcb[i]->id);
1254                 err = request_irq(bnad->msix_table[vector_num].vector,
1255                                   (irq_handler_t)bnad_msix_tx, 0,
1256                                   tx_info->tcb[i]->name,
1257                                   tx_info->tcb[i]);
1258                 if (err)
1259                         goto err_return;
1260         }
1261
1262         return 0;
1263
1264 err_return:
1265         if (i > 0)
1266                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1267         return -1;
1268 }
1269
1270 /**
1271  * NOTE: Should be called for MSIX only
1272  * Unregisters Rx MSIX vector(s) from the kernel
1273  */
1274 static void
1275 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1276                         int num_rxps)
1277 {
1278         int i;
1279         int vector_num;
1280
1281         for (i = 0; i < num_rxps; i++) {
1282                 if (rx_info->rx_ctrl[i].ccb == NULL)
1283                         continue;
1284
1285                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1286                 free_irq(bnad->msix_table[vector_num].vector,
1287                          rx_info->rx_ctrl[i].ccb);
1288         }
1289 }
1290
1291 /**
1292  * NOTE: Should be called for MSIX only
1293  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1294  */
1295 static int
1296 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1297                         uint rx_id, int num_rxps)
1298 {
1299         int i;
1300         int err;
1301         int vector_num;
1302
1303         for (i = 0; i < num_rxps; i++) {
1304                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1305                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1306                         bnad->netdev->name,
1307                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1308                 err = request_irq(bnad->msix_table[vector_num].vector,
1309                                   (irq_handler_t)bnad_msix_rx, 0,
1310                                   rx_info->rx_ctrl[i].ccb->name,
1311                                   rx_info->rx_ctrl[i].ccb);
1312                 if (err)
1313                         goto err_return;
1314         }
1315
1316         return 0;
1317
1318 err_return:
1319         if (i > 0)
1320                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1321         return -1;
1322 }
1323
1324 /* Free Tx object Resources */
1325 static void
1326 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1327 {
1328         int i;
1329
1330         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1331                 if (res_info[i].res_type == BNA_RES_T_MEM)
1332                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1333                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1334                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1335         }
1336 }
1337
1338 /* Allocates memory and interrupt resources for Tx object */
1339 static int
1340 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1341                   uint tx_id)
1342 {
1343         int i, err = 0;
1344
1345         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1346                 if (res_info[i].res_type == BNA_RES_T_MEM)
1347                         err = bnad_mem_alloc(bnad,
1348                                         &res_info[i].res_u.mem_info);
1349                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1350                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1351                                         &res_info[i].res_u.intr_info);
1352                 if (err)
1353                         goto err_return;
1354         }
1355         return 0;
1356
1357 err_return:
1358         bnad_tx_res_free(bnad, res_info);
1359         return err;
1360 }
1361
1362 /* Free Rx object Resources */
1363 static void
1364 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1365 {
1366         int i;
1367
1368         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1369                 if (res_info[i].res_type == BNA_RES_T_MEM)
1370                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1371                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1372                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1373         }
1374 }
1375
1376 /* Allocates memory and interrupt resources for Rx object */
1377 static int
1378 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1379                   uint rx_id)
1380 {
1381         int i, err = 0;
1382
1383         /* All memory needs to be allocated before setup_ccbs */
1384         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1385                 if (res_info[i].res_type == BNA_RES_T_MEM)
1386                         err = bnad_mem_alloc(bnad,
1387                                         &res_info[i].res_u.mem_info);
1388                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1389                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1390                                         &res_info[i].res_u.intr_info);
1391                 if (err)
1392                         goto err_return;
1393         }
1394         return 0;
1395
1396 err_return:
1397         bnad_rx_res_free(bnad, res_info);
1398         return err;
1399 }
1400
1401 /* Timer callbacks */
1402 /* a) IOC timer */
1403 static void
1404 bnad_ioc_timeout(unsigned long data)
1405 {
1406         struct bnad *bnad = (struct bnad *)data;
1407         unsigned long flags;
1408
1409         spin_lock_irqsave(&bnad->bna_lock, flags);
1410         bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1411         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1412 }
1413
1414 static void
1415 bnad_ioc_hb_check(unsigned long data)
1416 {
1417         struct bnad *bnad = (struct bnad *)data;
1418         unsigned long flags;
1419
1420         spin_lock_irqsave(&bnad->bna_lock, flags);
1421         bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1422         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423 }
1424
1425 static void
1426 bnad_iocpf_timeout(unsigned long data)
1427 {
1428         struct bnad *bnad = (struct bnad *)data;
1429         unsigned long flags;
1430
1431         spin_lock_irqsave(&bnad->bna_lock, flags);
1432         bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1433         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1434 }
1435
1436 static void
1437 bnad_iocpf_sem_timeout(unsigned long data)
1438 {
1439         struct bnad *bnad = (struct bnad *)data;
1440         unsigned long flags;
1441
1442         spin_lock_irqsave(&bnad->bna_lock, flags);
1443         bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
1444         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1445 }
1446
1447 /*
1448  * All timer routines use bnad->bna_lock to protect against
1449  * the following race, which may occur in case of no locking:
1450  *      Time    CPU m   CPU n
1451  *      0       1 = test_bit
1452  *      1                       clear_bit
1453  *      2                       del_timer_sync
1454  *      3       mod_timer
1455  */
1456
1457 /* b) Dynamic Interrupt Moderation Timer */
1458 static void
1459 bnad_dim_timeout(unsigned long data)
1460 {
1461         struct bnad *bnad = (struct bnad *)data;
1462         struct bnad_rx_info *rx_info;
1463         struct bnad_rx_ctrl *rx_ctrl;
1464         int i, j;
1465         unsigned long flags;
1466
1467         if (!netif_carrier_ok(bnad->netdev))
1468                 return;
1469
1470         spin_lock_irqsave(&bnad->bna_lock, flags);
1471         for (i = 0; i < bnad->num_rx; i++) {
1472                 rx_info = &bnad->rx_info[i];
1473                 if (!rx_info->rx)
1474                         continue;
1475                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1476                         rx_ctrl = &rx_info->rx_ctrl[j];
1477                         if (!rx_ctrl->ccb)
1478                                 continue;
1479                         bna_rx_dim_update(rx_ctrl->ccb);
1480                 }
1481         }
1482
1483         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1484         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1485                 mod_timer(&bnad->dim_timer,
1486                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1487         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1488 }
1489
1490 /* c)  Statistics Timer */
1491 static void
1492 bnad_stats_timeout(unsigned long data)
1493 {
1494         struct bnad *bnad = (struct bnad *)data;
1495         unsigned long flags;
1496
1497         if (!netif_running(bnad->netdev) ||
1498                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1499                 return;
1500
1501         spin_lock_irqsave(&bnad->bna_lock, flags);
1502         bna_stats_get(&bnad->bna);
1503         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1504 }
1505
1506 /*
1507  * Set up timer for DIM
1508  * Called with bnad->bna_lock held
1509  */
1510 void
1511 bnad_dim_timer_start(struct bnad *bnad)
1512 {
1513         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1514             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1515                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1516                             (unsigned long)bnad);
1517                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1518                 mod_timer(&bnad->dim_timer,
1519                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1520         }
1521 }
1522
1523 /*
1524  * Set up timer for statistics
1525  * Called with mutex_lock(&bnad->conf_mutex) held
1526  */
1527 static void
1528 bnad_stats_timer_start(struct bnad *bnad)
1529 {
1530         unsigned long flags;
1531
1532         spin_lock_irqsave(&bnad->bna_lock, flags);
1533         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1534                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1535                             (unsigned long)bnad);
1536                 mod_timer(&bnad->stats_timer,
1537                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1538         }
1539         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1540 }
1541
1542 /*
1543  * Stops the stats timer
1544  * Called with mutex_lock(&bnad->conf_mutex) held
1545  */
1546 static void
1547 bnad_stats_timer_stop(struct bnad *bnad)
1548 {
1549         int to_del = 0;
1550         unsigned long flags;
1551
1552         spin_lock_irqsave(&bnad->bna_lock, flags);
1553         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1554                 to_del = 1;
1555         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1556         if (to_del)
1557                 del_timer_sync(&bnad->stats_timer);
1558 }
1559
1560 /* Utilities */
1561
1562 static void
1563 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1564 {
1565         int i = 1; /* Index 0 has broadcast address */
1566         struct netdev_hw_addr *mc_addr;
1567
1568         netdev_for_each_mc_addr(mc_addr, netdev) {
1569                 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1570                                                         ETH_ALEN);
1571                 i++;
1572         }
1573 }
1574
1575 static int
1576 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1577 {
1578         struct bnad_rx_ctrl *rx_ctrl =
1579                 container_of(napi, struct bnad_rx_ctrl, napi);
1580         struct bna_ccb *ccb;
1581         struct bnad *bnad;
1582         int rcvd = 0;
1583
1584         ccb = rx_ctrl->ccb;
1585
1586         bnad = ccb->bnad;
1587
1588         if (!netif_carrier_ok(bnad->netdev))
1589                 goto poll_exit;
1590
1591         rcvd = bnad_poll_cq(bnad, ccb, budget);
1592         if (rcvd == budget)
1593                 return rcvd;
1594
1595 poll_exit:
1596         napi_complete((napi));
1597
1598         BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1599
1600         bnad_enable_rx_irq(bnad, ccb);
1601         return rcvd;
1602 }
1603
1604 static void
1605 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1606 {
1607         struct bnad_rx_ctrl *rx_ctrl;
1608         int i;
1609
1610         /* Initialize & enable NAPI */
1611         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1612                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1613
1614                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1615                                bnad_napi_poll_rx, 64);
1616
1617                 napi_enable(&rx_ctrl->napi);
1618         }
1619 }
1620
1621 static void
1622 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1623 {
1624         int i;
1625
1626         /* First disable and then clean up */
1627         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1628                 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1629                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1630         }
1631 }
1632
1633 /* Should be held with conf_lock held */
1634 void
1635 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1636 {
1637         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1638         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1639         unsigned long flags;
1640
1641         if (!tx_info->tx)
1642                 return;
1643
1644         init_completion(&bnad->bnad_completions.tx_comp);
1645         spin_lock_irqsave(&bnad->bna_lock, flags);
1646         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1647         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1648         wait_for_completion(&bnad->bnad_completions.tx_comp);
1649
1650         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1651                 bnad_tx_msix_unregister(bnad, tx_info,
1652                         bnad->num_txq_per_tx);
1653
1654         spin_lock_irqsave(&bnad->bna_lock, flags);
1655         bna_tx_destroy(tx_info->tx);
1656         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1657
1658         tx_info->tx = NULL;
1659
1660         if (0 == tx_id)
1661                 tasklet_kill(&bnad->tx_free_tasklet);
1662
1663         bnad_tx_res_free(bnad, res_info);
1664 }
1665
1666 /* Should be held with conf_lock held */
1667 int
1668 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1669 {
1670         int err;
1671         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1672         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1673         struct bna_intr_info *intr_info =
1674                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1675         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1676         struct bna_tx_event_cbfn tx_cbfn;
1677         struct bna_tx *tx;
1678         unsigned long flags;
1679
1680         /* Initialize the Tx object configuration */
1681         tx_config->num_txq = bnad->num_txq_per_tx;
1682         tx_config->txq_depth = bnad->txq_depth;
1683         tx_config->tx_type = BNA_TX_T_REGULAR;
1684
1685         /* Initialize the tx event handlers */
1686         tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1687         tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1688         tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1689         tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1690         tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1691
1692         /* Get BNA's resource requirement for one tx object */
1693         spin_lock_irqsave(&bnad->bna_lock, flags);
1694         bna_tx_res_req(bnad->num_txq_per_tx,
1695                 bnad->txq_depth, res_info);
1696         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1697
1698         /* Fill Unmap Q memory requirements */
1699         BNAD_FILL_UNMAPQ_MEM_REQ(
1700                         &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1701                         bnad->num_txq_per_tx,
1702                         BNAD_TX_UNMAPQ_DEPTH);
1703
1704         /* Allocate resources */
1705         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1706         if (err)
1707                 return err;
1708
1709         /* Ask BNA to create one Tx object, supplying required resources */
1710         spin_lock_irqsave(&bnad->bna_lock, flags);
1711         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1712                         tx_info);
1713         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1714         if (!tx)
1715                 goto err_return;
1716         tx_info->tx = tx;
1717
1718         /* Register ISR for the Tx object */
1719         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1720                 err = bnad_tx_msix_register(bnad, tx_info,
1721                         tx_id, bnad->num_txq_per_tx);
1722                 if (err)
1723                         goto err_return;
1724         }
1725
1726         spin_lock_irqsave(&bnad->bna_lock, flags);
1727         bna_tx_enable(tx);
1728         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729
1730         return 0;
1731
1732 err_return:
1733         bnad_tx_res_free(bnad, res_info);
1734         return err;
1735 }
1736
1737 /* Setup the rx config for bna_rx_create */
1738 /* bnad decides the configuration */
1739 static void
1740 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1741 {
1742         rx_config->rx_type = BNA_RX_T_REGULAR;
1743         rx_config->num_paths = bnad->num_rxp_per_rx;
1744
1745         if (bnad->num_rxp_per_rx > 1) {
1746                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1747                 rx_config->rss_config.hash_type =
1748                                 (BFI_RSS_T_V4_TCP |
1749                                  BFI_RSS_T_V6_TCP |
1750                                  BFI_RSS_T_V4_IP  |
1751                                  BFI_RSS_T_V6_IP);
1752                 rx_config->rss_config.hash_mask =
1753                                 bnad->num_rxp_per_rx - 1;
1754                 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1755                         sizeof(rx_config->rss_config.toeplitz_hash_key));
1756         } else {
1757                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1758                 memset(&rx_config->rss_config, 0,
1759                        sizeof(rx_config->rss_config));
1760         }
1761         rx_config->rxp_type = BNA_RXP_SLR;
1762         rx_config->q_depth = bnad->rxq_depth;
1763
1764         rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1765
1766         rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1767 }
1768
1769 /* Called with mutex_lock(&bnad->conf_mutex) held */
1770 void
1771 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1772 {
1773         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1774         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1775         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1776         unsigned long flags;
1777         int dim_timer_del = 0;
1778
1779         if (!rx_info->rx)
1780                 return;
1781
1782         if (0 == rx_id) {
1783                 spin_lock_irqsave(&bnad->bna_lock, flags);
1784                 dim_timer_del = bnad_dim_timer_running(bnad);
1785                 if (dim_timer_del)
1786                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1787                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788                 if (dim_timer_del)
1789                         del_timer_sync(&bnad->dim_timer);
1790         }
1791
1792         bnad_napi_disable(bnad, rx_id);
1793
1794         init_completion(&bnad->bnad_completions.rx_comp);
1795         spin_lock_irqsave(&bnad->bna_lock, flags);
1796         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1797         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1798         wait_for_completion(&bnad->bnad_completions.rx_comp);
1799
1800         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1801                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1802
1803         spin_lock_irqsave(&bnad->bna_lock, flags);
1804         bna_rx_destroy(rx_info->rx);
1805         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1806
1807         rx_info->rx = NULL;
1808
1809         bnad_rx_res_free(bnad, res_info);
1810 }
1811
1812 /* Called with mutex_lock(&bnad->conf_mutex) held */
1813 int
1814 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1815 {
1816         int err;
1817         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1818         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1819         struct bna_intr_info *intr_info =
1820                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1821         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1822         struct bna_rx_event_cbfn rx_cbfn;
1823         struct bna_rx *rx;
1824         unsigned long flags;
1825
1826         /* Initialize the Rx object configuration */
1827         bnad_init_rx_config(bnad, rx_config);
1828
1829         /* Initialize the Rx event handlers */
1830         rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1831         rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1832         rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1833         rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1834         rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1835         rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1836
1837         /* Get BNA's resource requirement for one Rx object */
1838         spin_lock_irqsave(&bnad->bna_lock, flags);
1839         bna_rx_res_req(rx_config, res_info);
1840         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1841
1842         /* Fill Unmap Q memory requirements */
1843         BNAD_FILL_UNMAPQ_MEM_REQ(
1844                         &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1845                         rx_config->num_paths +
1846                         ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1847                                 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1848
1849         /* Allocate resource */
1850         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1851         if (err)
1852                 return err;
1853
1854         /* Ask BNA to create one Rx object, supplying required resources */
1855         spin_lock_irqsave(&bnad->bna_lock, flags);
1856         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1857                         rx_info);
1858         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1859         if (!rx)
1860                 goto err_return;
1861         rx_info->rx = rx;
1862
1863         /* Register ISR for the Rx object */
1864         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1865                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1866                                                 rx_config->num_paths);
1867                 if (err)
1868                         goto err_return;
1869         }
1870
1871         /* Enable NAPI */
1872         bnad_napi_enable(bnad, rx_id);
1873
1874         spin_lock_irqsave(&bnad->bna_lock, flags);
1875         if (0 == rx_id) {
1876                 /* Set up Dynamic Interrupt Moderation Vector */
1877                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1878                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1879
1880                 /* Enable VLAN filtering only on the default Rx */
1881                 bna_rx_vlanfilter_enable(rx);
1882
1883                 /* Start the DIM timer */
1884                 bnad_dim_timer_start(bnad);
1885         }
1886
1887         bna_rx_enable(rx);
1888         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1889
1890         return 0;
1891
1892 err_return:
1893         bnad_cleanup_rx(bnad, rx_id);
1894         return err;
1895 }
1896
1897 /* Called with conf_lock & bnad->bna_lock held */
1898 void
1899 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1900 {
1901         struct bnad_tx_info *tx_info;
1902
1903         tx_info = &bnad->tx_info[0];
1904         if (!tx_info->tx)
1905                 return;
1906
1907         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1908 }
1909
1910 /* Called with conf_lock & bnad->bna_lock held */
1911 void
1912 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1913 {
1914         struct bnad_rx_info *rx_info;
1915         int     i;
1916
1917         for (i = 0; i < bnad->num_rx; i++) {
1918                 rx_info = &bnad->rx_info[i];
1919                 if (!rx_info->rx)
1920                         continue;
1921                 bna_rx_coalescing_timeo_set(rx_info->rx,
1922                                 bnad->rx_coalescing_timeo);
1923         }
1924 }
1925
1926 /*
1927  * Called with bnad->bna_lock held
1928  */
1929 static int
1930 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1931 {
1932         int ret;
1933
1934         if (!is_valid_ether_addr(mac_addr))
1935                 return -EADDRNOTAVAIL;
1936
1937         /* If datapath is down, pretend everything went through */
1938         if (!bnad->rx_info[0].rx)
1939                 return 0;
1940
1941         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1942         if (ret != BNA_CB_SUCCESS)
1943                 return -EADDRNOTAVAIL;
1944
1945         return 0;
1946 }
1947
1948 /* Should be called with conf_lock held */
1949 static int
1950 bnad_enable_default_bcast(struct bnad *bnad)
1951 {
1952         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1953         int ret;
1954         unsigned long flags;
1955
1956         init_completion(&bnad->bnad_completions.mcast_comp);
1957
1958         spin_lock_irqsave(&bnad->bna_lock, flags);
1959         ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1960                                 bnad_cb_rx_mcast_add);
1961         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1962
1963         if (ret == BNA_CB_SUCCESS)
1964                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1965         else
1966                 return -ENODEV;
1967
1968         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1969                 return -ENODEV;
1970
1971         return 0;
1972 }
1973
1974 /* Called with bnad_conf_lock() held */
1975 static void
1976 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1977 {
1978         u16 vid;
1979         unsigned long flags;
1980
1981         BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1982
1983         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
1984                 spin_lock_irqsave(&bnad->bna_lock, flags);
1985                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
1986                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1987         }
1988 }
1989
1990 /* Statistics utilities */
1991 void
1992 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1993 {
1994         int i, j;
1995
1996         for (i = 0; i < bnad->num_rx; i++) {
1997                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1998                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1999                                 stats->rx_packets += bnad->rx_info[i].
2000                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2001                                 stats->rx_bytes += bnad->rx_info[i].
2002                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2003                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2004                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2005                                         rcb[1]->rxq) {
2006                                         stats->rx_packets +=
2007                                                 bnad->rx_info[i].rx_ctrl[j].
2008                                                 ccb->rcb[1]->rxq->rx_packets;
2009                                         stats->rx_bytes +=
2010                                                 bnad->rx_info[i].rx_ctrl[j].
2011                                                 ccb->rcb[1]->rxq->rx_bytes;
2012                                 }
2013                         }
2014                 }
2015         }
2016         for (i = 0; i < bnad->num_tx; i++) {
2017                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2018                         if (bnad->tx_info[i].tcb[j]) {
2019                                 stats->tx_packets +=
2020                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2021                                 stats->tx_bytes +=
2022                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2023                         }
2024                 }
2025         }
2026 }
2027
2028 /*
2029  * Must be called with the bna_lock held.
2030  */
2031 void
2032 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2033 {
2034         struct bfi_ll_stats_mac *mac_stats;
2035         u64 bmap;
2036         int i;
2037
2038         mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2039         stats->rx_errors =
2040                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2041                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2042                 mac_stats->rx_undersize;
2043         stats->tx_errors = mac_stats->tx_fcs_error +
2044                                         mac_stats->tx_undersize;
2045         stats->rx_dropped = mac_stats->rx_drop;
2046         stats->tx_dropped = mac_stats->tx_drop;
2047         stats->multicast = mac_stats->rx_multicast;
2048         stats->collisions = mac_stats->tx_total_collision;
2049
2050         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2051
2052         /* receive ring buffer overflow  ?? */
2053
2054         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2055         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2056         /* recv'r fifo overrun */
2057         bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2058                 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2059         for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2060                 if (bmap & 1) {
2061                         stats->rx_fifo_errors +=
2062                                 bnad->stats.bna_stats->
2063                                         hw_stats->rxf_stats[i].frame_drops;
2064                         break;
2065                 }
2066                 bmap >>= 1;
2067         }
2068 }
2069
2070 static void
2071 bnad_mbox_irq_sync(struct bnad *bnad)
2072 {
2073         u32 irq;
2074         unsigned long flags;
2075
2076         spin_lock_irqsave(&bnad->bna_lock, flags);
2077         if (bnad->cfg_flags & BNAD_CF_MSIX)
2078                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2079         else
2080                 irq = bnad->pcidev->irq;
2081         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2082
2083         synchronize_irq(irq);
2084 }
2085
2086 /* Utility used by bnad_start_xmit, for doing TSO */
2087 static int
2088 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2089 {
2090         int err;
2091
2092         /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2093         BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2094                    skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2095         if (skb_header_cloned(skb)) {
2096                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2097                 if (err) {
2098                         BNAD_UPDATE_CTR(bnad, tso_err);
2099                         return err;
2100                 }
2101         }
2102
2103         /*
2104          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2105          * excluding the length field.
2106          */
2107         if (skb->protocol == htons(ETH_P_IP)) {
2108                 struct iphdr *iph = ip_hdr(skb);
2109
2110                 /* Do we really need these? */
2111                 iph->tot_len = 0;
2112                 iph->check = 0;
2113
2114                 tcp_hdr(skb)->check =
2115                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2116                                            IPPROTO_TCP, 0);
2117                 BNAD_UPDATE_CTR(bnad, tso4);
2118         } else {
2119                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2120
2121                 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2122                 ipv6h->payload_len = 0;
2123                 tcp_hdr(skb)->check =
2124                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2125                                          IPPROTO_TCP, 0);
2126                 BNAD_UPDATE_CTR(bnad, tso6);
2127         }
2128
2129         return 0;
2130 }
2131
2132 /*
2133  * Initialize Q numbers depending on Rx Paths
2134  * Called with bnad->bna_lock held, because of cfg_flags
2135  * access.
2136  */
2137 static void
2138 bnad_q_num_init(struct bnad *bnad)
2139 {
2140         int rxps;
2141
2142         rxps = min((uint)num_online_cpus(),
2143                         (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2144
2145         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2146                 rxps = 1;       /* INTx */
2147
2148         bnad->num_rx = 1;
2149         bnad->num_tx = 1;
2150         bnad->num_rxp_per_rx = rxps;
2151         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2152 }
2153
2154 /*
2155  * Adjusts the Q numbers, given a number of msix vectors
2156  * Give preference to RSS as opposed to Tx priority Queues,
2157  * in such a case, just use 1 Tx Q
2158  * Called with bnad->bna_lock held b'cos of cfg_flags access
2159  */
2160 static void
2161 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2162 {
2163         bnad->num_txq_per_tx = 1;
2164         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2165              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2166             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2167                 bnad->num_rxp_per_rx = msix_vectors -
2168                         (bnad->num_tx * bnad->num_txq_per_tx) -
2169                         BNAD_MAILBOX_MSIX_VECTORS;
2170         } else
2171                 bnad->num_rxp_per_rx = 1;
2172 }
2173
2174 /* Enable / disable device */
2175 static void
2176 bnad_device_disable(struct bnad *bnad)
2177 {
2178         unsigned long flags;
2179
2180         init_completion(&bnad->bnad_completions.ioc_comp);
2181
2182         spin_lock_irqsave(&bnad->bna_lock, flags);
2183         bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2184         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185
2186         wait_for_completion(&bnad->bnad_completions.ioc_comp);
2187 }
2188
2189 static int
2190 bnad_device_enable(struct bnad *bnad)
2191 {
2192         int err = 0;
2193         unsigned long flags;
2194
2195         init_completion(&bnad->bnad_completions.ioc_comp);
2196
2197         spin_lock_irqsave(&bnad->bna_lock, flags);
2198         bna_device_enable(&bnad->bna.device);
2199         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2200
2201         wait_for_completion(&bnad->bnad_completions.ioc_comp);
2202
2203         if (bnad->bnad_completions.ioc_comp_status)
2204                 err = bnad->bnad_completions.ioc_comp_status;
2205
2206         return err;
2207 }
2208
2209 /* Free BNA resources */
2210 static void
2211 bnad_res_free(struct bnad *bnad)
2212 {
2213         int i;
2214         struct bna_res_info *res_info = &bnad->res_info[0];
2215
2216         for (i = 0; i < BNA_RES_T_MAX; i++) {
2217                 if (res_info[i].res_type == BNA_RES_T_MEM)
2218                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2219                 else
2220                         bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2221         }
2222 }
2223
2224 /* Allocates memory and interrupt resources for BNA */
2225 static int
2226 bnad_res_alloc(struct bnad *bnad)
2227 {
2228         int i, err;
2229         struct bna_res_info *res_info = &bnad->res_info[0];
2230
2231         for (i = 0; i < BNA_RES_T_MAX; i++) {
2232                 if (res_info[i].res_type == BNA_RES_T_MEM)
2233                         err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2234                 else
2235                         err = bnad_mbox_irq_alloc(bnad,
2236                                                   &res_info[i].res_u.intr_info);
2237                 if (err)
2238                         goto err_return;
2239         }
2240         return 0;
2241
2242 err_return:
2243         bnad_res_free(bnad);
2244         return err;
2245 }
2246
2247 /* Interrupt enable / disable */
2248 static void
2249 bnad_enable_msix(struct bnad *bnad)
2250 {
2251         int i, ret;
2252         unsigned long flags;
2253
2254         spin_lock_irqsave(&bnad->bna_lock, flags);
2255         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2256                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2257                 return;
2258         }
2259         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2260
2261         if (bnad->msix_table)
2262                 return;
2263
2264         bnad->msix_table =
2265                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2266
2267         if (!bnad->msix_table)
2268                 goto intx_mode;
2269
2270         for (i = 0; i < bnad->msix_num; i++)
2271                 bnad->msix_table[i].entry = i;
2272
2273         ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2274         if (ret > 0) {
2275                 /* Not enough MSI-X vectors. */
2276
2277                 spin_lock_irqsave(&bnad->bna_lock, flags);
2278                 /* ret = #of vectors that we got */
2279                 bnad_q_num_adjust(bnad, ret);
2280                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2281
2282                 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2283                         + (bnad->num_rx
2284                         * bnad->num_rxp_per_rx) +
2285                          BNAD_MAILBOX_MSIX_VECTORS;
2286
2287                 /* Try once more with adjusted numbers */
2288                 /* If this fails, fall back to INTx */
2289                 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2290                                       bnad->msix_num);
2291                 if (ret)
2292                         goto intx_mode;
2293
2294         } else if (ret < 0)
2295                 goto intx_mode;
2296         return;
2297
2298 intx_mode:
2299
2300         kfree(bnad->msix_table);
2301         bnad->msix_table = NULL;
2302         bnad->msix_num = 0;
2303         spin_lock_irqsave(&bnad->bna_lock, flags);
2304         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2305         bnad_q_num_init(bnad);
2306         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2307 }
2308
2309 static void
2310 bnad_disable_msix(struct bnad *bnad)
2311 {
2312         u32 cfg_flags;
2313         unsigned long flags;
2314
2315         spin_lock_irqsave(&bnad->bna_lock, flags);
2316         cfg_flags = bnad->cfg_flags;
2317         if (bnad->cfg_flags & BNAD_CF_MSIX)
2318                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2319         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2320
2321         if (cfg_flags & BNAD_CF_MSIX) {
2322                 pci_disable_msix(bnad->pcidev);
2323                 kfree(bnad->msix_table);
2324                 bnad->msix_table = NULL;
2325         }
2326 }
2327
2328 /* Netdev entry points */
2329 static int
2330 bnad_open(struct net_device *netdev)
2331 {
2332         int err;
2333         struct bnad *bnad = netdev_priv(netdev);
2334         struct bna_pause_config pause_config;
2335         int mtu;
2336         unsigned long flags;
2337
2338         mutex_lock(&bnad->conf_mutex);
2339
2340         /* Tx */
2341         err = bnad_setup_tx(bnad, 0);
2342         if (err)
2343                 goto err_return;
2344
2345         /* Rx */
2346         err = bnad_setup_rx(bnad, 0);
2347         if (err)
2348                 goto cleanup_tx;
2349
2350         /* Port */
2351         pause_config.tx_pause = 0;
2352         pause_config.rx_pause = 0;
2353
2354         mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2355
2356         spin_lock_irqsave(&bnad->bna_lock, flags);
2357         bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2358         bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2359         bna_port_enable(&bnad->bna.port);
2360         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2361
2362         /* Enable broadcast */
2363         bnad_enable_default_bcast(bnad);
2364
2365         /* Restore VLANs, if any */
2366         bnad_restore_vlans(bnad, 0);
2367
2368         /* Set the UCAST address */
2369         spin_lock_irqsave(&bnad->bna_lock, flags);
2370         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2371         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2372
2373         /* Start the stats timer */
2374         bnad_stats_timer_start(bnad);
2375
2376         mutex_unlock(&bnad->conf_mutex);
2377
2378         return 0;
2379
2380 cleanup_tx:
2381         bnad_cleanup_tx(bnad, 0);
2382
2383 err_return:
2384         mutex_unlock(&bnad->conf_mutex);
2385         return err;
2386 }
2387
2388 static int
2389 bnad_stop(struct net_device *netdev)
2390 {
2391         struct bnad *bnad = netdev_priv(netdev);
2392         unsigned long flags;
2393
2394         mutex_lock(&bnad->conf_mutex);
2395
2396         /* Stop the stats timer */
2397         bnad_stats_timer_stop(bnad);
2398
2399         init_completion(&bnad->bnad_completions.port_comp);
2400
2401         spin_lock_irqsave(&bnad->bna_lock, flags);
2402         bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2403                         bnad_cb_port_disabled);
2404         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2405
2406         wait_for_completion(&bnad->bnad_completions.port_comp);
2407
2408         bnad_cleanup_tx(bnad, 0);
2409         bnad_cleanup_rx(bnad, 0);
2410
2411         /* Synchronize mailbox IRQ */
2412         bnad_mbox_irq_sync(bnad);
2413
2414         mutex_unlock(&bnad->conf_mutex);
2415
2416         return 0;
2417 }
2418
2419 /* TX */
2420 /*
2421  * bnad_start_xmit : Netdev entry point for Transmit
2422  *                   Called under lock held by net_device
2423  */
2424 static netdev_tx_t
2425 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2426 {
2427         struct bnad *bnad = netdev_priv(netdev);
2428
2429         u16             txq_prod, vlan_tag = 0;
2430         u32             unmap_prod, wis, wis_used, wi_range;
2431         u32             vectors, vect_id, i, acked;
2432         u32             tx_id;
2433         int                     err;
2434
2435         struct bnad_tx_info *tx_info;
2436         struct bna_tcb *tcb;
2437         struct bnad_unmap_q *unmap_q;
2438         dma_addr_t              dma_addr;
2439         struct bna_txq_entry *txqent;
2440         bna_txq_wi_ctrl_flag_t  flags;
2441
2442         if (unlikely
2443             (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2444                 dev_kfree_skb(skb);
2445                 return NETDEV_TX_OK;
2446         }
2447
2448         tx_id = 0;
2449
2450         tx_info = &bnad->tx_info[tx_id];
2451         tcb = tx_info->tcb[tx_id];
2452         unmap_q = tcb->unmap_q;
2453
2454         /*
2455          * Takes care of the Tx that is scheduled between clearing the flag
2456          * and the netif_stop_queue() call.
2457          */
2458         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2459                 dev_kfree_skb(skb);
2460                 return NETDEV_TX_OK;
2461         }
2462
2463         vectors = 1 + skb_shinfo(skb)->nr_frags;
2464         if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2465                 dev_kfree_skb(skb);
2466                 return NETDEV_TX_OK;
2467         }
2468         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2469         acked = 0;
2470         if (unlikely
2471             (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2472              vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2473                 if ((u16) (*tcb->hw_consumer_index) !=
2474                     tcb->consumer_index &&
2475                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2476                         acked = bnad_free_txbufs(bnad, tcb);
2477                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2478                                 bna_ib_ack(tcb->i_dbell, acked);
2479                         smp_mb__before_clear_bit();
2480                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2481                 } else {
2482                         netif_stop_queue(netdev);
2483                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2484                 }
2485
2486                 smp_mb();
2487                 /*
2488                  * Check again to deal with race condition between
2489                  * netif_stop_queue here, and netif_wake_queue in
2490                  * interrupt handler which is not inside netif tx lock.
2491                  */
2492                 if (likely
2493                     (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2494                      vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2495                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2496                         return NETDEV_TX_BUSY;
2497                 } else {
2498                         netif_wake_queue(netdev);
2499                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2500                 }
2501         }
2502
2503         unmap_prod = unmap_q->producer_index;
2504         wis_used = 1;
2505         vect_id = 0;
2506         flags = 0;
2507
2508         txq_prod = tcb->producer_index;
2509         BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2510         BUG_ON(!(wi_range <= tcb->q_depth));
2511         txqent->hdr.wi.reserved = 0;
2512         txqent->hdr.wi.num_vectors = vectors;
2513         txqent->hdr.wi.opcode =
2514                 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2515                        BNA_TXQ_WI_SEND));
2516
2517         if (vlan_tx_tag_present(skb)) {
2518                 vlan_tag = (u16) vlan_tx_tag_get(skb);
2519                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2520         }
2521         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2522                 vlan_tag =
2523                         (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2524                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2525         }
2526
2527         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2528
2529         if (skb_is_gso(skb)) {
2530                 err = bnad_tso_prepare(bnad, skb);
2531                 if (err) {
2532                         dev_kfree_skb(skb);
2533                         return NETDEV_TX_OK;
2534                 }
2535                 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2536                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2537                 txqent->hdr.wi.l4_hdr_size_n_offset =
2538                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2539                               (tcp_hdrlen(skb) >> 2,
2540                                skb_transport_offset(skb)));
2541         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2542                 u8 proto = 0;
2543
2544                 txqent->hdr.wi.lso_mss = 0;
2545
2546                 if (skb->protocol == htons(ETH_P_IP))
2547                         proto = ip_hdr(skb)->protocol;
2548                 else if (skb->protocol == htons(ETH_P_IPV6)) {
2549                         /* nexthdr may not be TCP immediately. */
2550                         proto = ipv6_hdr(skb)->nexthdr;
2551                 }
2552                 if (proto == IPPROTO_TCP) {
2553                         flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2554                         txqent->hdr.wi.l4_hdr_size_n_offset =
2555                                 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2556                                       (0, skb_transport_offset(skb)));
2557
2558                         BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2559
2560                         BUG_ON(!(skb_headlen(skb) >=
2561                                 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2562
2563                 } else if (proto == IPPROTO_UDP) {
2564                         flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2565                         txqent->hdr.wi.l4_hdr_size_n_offset =
2566                                 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2567                                       (0, skb_transport_offset(skb)));
2568
2569                         BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2570
2571                         BUG_ON(!(skb_headlen(skb) >=
2572                                    skb_transport_offset(skb) +
2573                                    sizeof(struct udphdr)));
2574                 } else {
2575                         err = skb_checksum_help(skb);
2576                         BNAD_UPDATE_CTR(bnad, csum_help);
2577                         if (err) {
2578                                 dev_kfree_skb(skb);
2579                                 BNAD_UPDATE_CTR(bnad, csum_help_err);
2580                                 return NETDEV_TX_OK;
2581                         }
2582                 }
2583         } else {
2584                 txqent->hdr.wi.lso_mss = 0;
2585                 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2586         }
2587
2588         txqent->hdr.wi.flags = htons(flags);
2589
2590         txqent->hdr.wi.frame_length = htonl(skb->len);
2591
2592         unmap_q->unmap_array[unmap_prod].skb = skb;
2593         BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2594         txqent->vector[vect_id].length = htons(skb_headlen(skb));
2595         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2596                                   skb_headlen(skb), DMA_TO_DEVICE);
2597         dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2598                            dma_addr);
2599
2600         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2601         BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2602
2603         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2604                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2605                 u32             size = frag->size;
2606
2607                 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2608                         vect_id = 0;
2609                         if (--wi_range)
2610                                 txqent++;
2611                         else {
2612                                 BNA_QE_INDX_ADD(txq_prod, wis_used,
2613                                                 tcb->q_depth);
2614                                 wis_used = 0;
2615                                 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2616                                                      txqent, wi_range);
2617                                 BUG_ON(!(wi_range <= tcb->q_depth));
2618                         }
2619                         wis_used++;
2620                         txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2621                 }
2622
2623                 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2624                 txqent->vector[vect_id].length = htons(size);
2625                 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2626                                         frag->page_offset, size, DMA_TO_DEVICE);
2627                 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2628                                    dma_addr);
2629                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2630                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2631         }
2632
2633         unmap_q->producer_index = unmap_prod;
2634         BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2635         tcb->producer_index = txq_prod;
2636
2637         smp_mb();
2638
2639         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2640                 return NETDEV_TX_OK;
2641
2642         bna_txq_prod_indx_doorbell(tcb);
2643
2644         if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2645                 tasklet_schedule(&bnad->tx_free_tasklet);
2646
2647         return NETDEV_TX_OK;
2648 }
2649
2650 /*
2651  * Used spin_lock to synchronize reading of stats structures, which
2652  * is written by BNA under the same lock.
2653  */
2654 static struct rtnl_link_stats64 *
2655 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2656 {
2657         struct bnad *bnad = netdev_priv(netdev);
2658         unsigned long flags;
2659
2660         spin_lock_irqsave(&bnad->bna_lock, flags);
2661
2662         bnad_netdev_qstats_fill(bnad, stats);
2663         bnad_netdev_hwstats_fill(bnad, stats);
2664
2665         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2666
2667         return stats;
2668 }
2669
2670 static void
2671 bnad_set_rx_mode(struct net_device *netdev)
2672 {
2673         struct bnad *bnad = netdev_priv(netdev);
2674         u32     new_mask, valid_mask;
2675         unsigned long flags;
2676
2677         spin_lock_irqsave(&bnad->bna_lock, flags);
2678
2679         new_mask = valid_mask = 0;
2680
2681         if (netdev->flags & IFF_PROMISC) {
2682                 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2683                         new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2684                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2685                         bnad->cfg_flags |= BNAD_CF_PROMISC;
2686                 }
2687         } else {
2688                 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2689                         new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2690                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2691                         bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2692                 }
2693         }
2694
2695         if (netdev->flags & IFF_ALLMULTI) {
2696                 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2697                         new_mask |= BNA_RXMODE_ALLMULTI;
2698                         valid_mask |= BNA_RXMODE_ALLMULTI;
2699                         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2700                 }
2701         } else {
2702                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2703                         new_mask &= ~BNA_RXMODE_ALLMULTI;
2704                         valid_mask |= BNA_RXMODE_ALLMULTI;
2705                         bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2706                 }
2707         }
2708
2709         bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2710
2711         if (!netdev_mc_empty(netdev)) {
2712                 u8 *mcaddr_list;
2713                 int mc_count = netdev_mc_count(netdev);
2714
2715                 /* Index 0 holds the broadcast address */
2716                 mcaddr_list =
2717                         kzalloc((mc_count + 1) * ETH_ALEN,
2718                                 GFP_ATOMIC);
2719                 if (!mcaddr_list)
2720                         goto unlock;
2721
2722                 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2723
2724                 /* Copy rest of the MC addresses */
2725                 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2726
2727                 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2728                                         mcaddr_list, NULL);
2729
2730                 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2731                 kfree(mcaddr_list);
2732         }
2733 unlock:
2734         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2735 }
2736
2737 /*
2738  * bna_lock is used to sync writes to netdev->addr
2739  * conf_lock cannot be used since this call may be made
2740  * in a non-blocking context.
2741  */
2742 static int
2743 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2744 {
2745         int err;
2746         struct bnad *bnad = netdev_priv(netdev);
2747         struct sockaddr *sa = (struct sockaddr *)mac_addr;
2748         unsigned long flags;
2749
2750         spin_lock_irqsave(&bnad->bna_lock, flags);
2751
2752         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2753
2754         if (!err)
2755                 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2756
2757         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2758
2759         return err;
2760 }
2761
2762 static int
2763 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2764 {
2765         int mtu, err = 0;
2766         unsigned long flags;
2767
2768         struct bnad *bnad = netdev_priv(netdev);
2769
2770         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2771                 return -EINVAL;
2772
2773         mutex_lock(&bnad->conf_mutex);
2774
2775         netdev->mtu = new_mtu;
2776
2777         mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2778
2779         spin_lock_irqsave(&bnad->bna_lock, flags);
2780         bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2781         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2782
2783         mutex_unlock(&bnad->conf_mutex);
2784         return err;
2785 }
2786
2787 static void
2788 bnad_vlan_rx_add_vid(struct net_device *netdev,
2789                                  unsigned short vid)
2790 {
2791         struct bnad *bnad = netdev_priv(netdev);
2792         unsigned long flags;
2793
2794         if (!bnad->rx_info[0].rx)
2795                 return;
2796
2797         mutex_lock(&bnad->conf_mutex);
2798
2799         spin_lock_irqsave(&bnad->bna_lock, flags);
2800         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2801         set_bit(vid, bnad->active_vlans);
2802         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2803
2804         mutex_unlock(&bnad->conf_mutex);
2805 }
2806
2807 static void
2808 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2809                                   unsigned short vid)
2810 {
2811         struct bnad *bnad = netdev_priv(netdev);
2812         unsigned long flags;
2813
2814         if (!bnad->rx_info[0].rx)
2815                 return;
2816
2817         mutex_lock(&bnad->conf_mutex);
2818
2819         spin_lock_irqsave(&bnad->bna_lock, flags);
2820         clear_bit(vid, bnad->active_vlans);
2821         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2822         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2823
2824         mutex_unlock(&bnad->conf_mutex);
2825 }
2826
2827 #ifdef CONFIG_NET_POLL_CONTROLLER
2828 static void
2829 bnad_netpoll(struct net_device *netdev)
2830 {
2831         struct bnad *bnad = netdev_priv(netdev);
2832         struct bnad_rx_info *rx_info;
2833         struct bnad_rx_ctrl *rx_ctrl;
2834         u32 curr_mask;
2835         int i, j;
2836
2837         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2838                 bna_intx_disable(&bnad->bna, curr_mask);
2839                 bnad_isr(bnad->pcidev->irq, netdev);
2840                 bna_intx_enable(&bnad->bna, curr_mask);
2841         } else {
2842                 for (i = 0; i < bnad->num_rx; i++) {
2843                         rx_info = &bnad->rx_info[i];
2844                         if (!rx_info->rx)
2845                                 continue;
2846                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2847                                 rx_ctrl = &rx_info->rx_ctrl[j];
2848                                 if (rx_ctrl->ccb) {
2849                                         bnad_disable_rx_irq(bnad,
2850                                                             rx_ctrl->ccb);
2851                                         bnad_netif_rx_schedule_poll(bnad,
2852                                                             rx_ctrl->ccb);
2853                                 }
2854                         }
2855                 }
2856         }
2857 }
2858 #endif
2859
2860 static const struct net_device_ops bnad_netdev_ops = {
2861         .ndo_open               = bnad_open,
2862         .ndo_stop               = bnad_stop,
2863         .ndo_start_xmit         = bnad_start_xmit,
2864         .ndo_get_stats64                = bnad_get_stats64,
2865         .ndo_set_rx_mode        = bnad_set_rx_mode,
2866         .ndo_set_multicast_list = bnad_set_rx_mode,
2867         .ndo_validate_addr      = eth_validate_addr,
2868         .ndo_set_mac_address    = bnad_set_mac_address,
2869         .ndo_change_mtu         = bnad_change_mtu,
2870         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
2871         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
2872 #ifdef CONFIG_NET_POLL_CONTROLLER
2873         .ndo_poll_controller    = bnad_netpoll
2874 #endif
2875 };
2876
2877 static void
2878 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2879 {
2880         struct net_device *netdev = bnad->netdev;
2881
2882         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2883                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2884                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
2885
2886         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2887                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2888                 NETIF_F_TSO | NETIF_F_TSO6;
2889
2890         netdev->features |= netdev->hw_features |
2891                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2892
2893         if (using_dac)
2894                 netdev->features |= NETIF_F_HIGHDMA;
2895
2896         netdev->mem_start = bnad->mmio_start;
2897         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2898
2899         netdev->netdev_ops = &bnad_netdev_ops;
2900         bnad_set_ethtool_ops(netdev);
2901 }
2902
2903 /*
2904  * 1. Initialize the bnad structure
2905  * 2. Setup netdev pointer in pci_dev
2906  * 3. Initialze Tx free tasklet
2907  * 4. Initialize no. of TxQ & CQs & MSIX vectors
2908  */
2909 static int
2910 bnad_init(struct bnad *bnad,
2911           struct pci_dev *pdev, struct net_device *netdev)
2912 {
2913         unsigned long flags;
2914
2915         SET_NETDEV_DEV(netdev, &pdev->dev);
2916         pci_set_drvdata(pdev, netdev);
2917
2918         bnad->netdev = netdev;
2919         bnad->pcidev = pdev;
2920         bnad->mmio_start = pci_resource_start(pdev, 0);
2921         bnad->mmio_len = pci_resource_len(pdev, 0);
2922         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2923         if (!bnad->bar0) {
2924                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2925                 pci_set_drvdata(pdev, NULL);
2926                 return -ENOMEM;
2927         }
2928         pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2929                (unsigned long long) bnad->mmio_len);
2930
2931         spin_lock_irqsave(&bnad->bna_lock, flags);
2932         if (!bnad_msix_disable)
2933                 bnad->cfg_flags = BNAD_CF_MSIX;
2934
2935         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2936
2937         bnad_q_num_init(bnad);
2938         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2939
2940         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2941                 (bnad->num_rx * bnad->num_rxp_per_rx) +
2942                          BNAD_MAILBOX_MSIX_VECTORS;
2943
2944         bnad->txq_depth = BNAD_TXQ_DEPTH;
2945         bnad->rxq_depth = BNAD_RXQ_DEPTH;
2946
2947         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2948         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2949
2950         tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2951                      (unsigned long)bnad);
2952
2953         return 0;
2954 }
2955
2956 /*
2957  * Must be called after bnad_pci_uninit()
2958  * so that iounmap() and pci_set_drvdata(NULL)
2959  * happens only after PCI uninitialization.
2960  */
2961 static void
2962 bnad_uninit(struct bnad *bnad)
2963 {
2964         if (bnad->bar0)
2965                 iounmap(bnad->bar0);
2966         pci_set_drvdata(bnad->pcidev, NULL);
2967 }
2968
2969 /*
2970  * Initialize locks
2971         a) Per device mutes used for serializing configuration
2972            changes from OS interface
2973         b) spin lock used to protect bna state machine
2974  */
2975 static void
2976 bnad_lock_init(struct bnad *bnad)
2977 {
2978         spin_lock_init(&bnad->bna_lock);
2979         mutex_init(&bnad->conf_mutex);
2980 }
2981
2982 static void
2983 bnad_lock_uninit(struct bnad *bnad)
2984 {
2985         mutex_destroy(&bnad->conf_mutex);
2986 }
2987
2988 /* PCI Initialization */
2989 static int
2990 bnad_pci_init(struct bnad *bnad,
2991               struct pci_dev *pdev, bool *using_dac)
2992 {
2993         int err;
2994
2995         err = pci_enable_device(pdev);
2996         if (err)
2997                 return err;
2998         err = pci_request_regions(pdev, BNAD_NAME);
2999         if (err)
3000                 goto disable_device;
3001         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3002             !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3003                 *using_dac = 1;
3004         } else {
3005                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3006                 if (err) {
3007                         err = dma_set_coherent_mask(&pdev->dev,
3008                                                     DMA_BIT_MASK(32));
3009                         if (err)
3010                                 goto release_regions;
3011                 }
3012                 *using_dac = 0;
3013         }
3014         pci_set_master(pdev);
3015         return 0;
3016
3017 release_regions:
3018         pci_release_regions(pdev);
3019 disable_device:
3020         pci_disable_device(pdev);
3021
3022         return err;
3023 }
3024
3025 static void
3026 bnad_pci_uninit(struct pci_dev *pdev)
3027 {
3028         pci_release_regions(pdev);
3029         pci_disable_device(pdev);
3030 }
3031
3032 static int __devinit
3033 bnad_pci_probe(struct pci_dev *pdev,
3034                 const struct pci_device_id *pcidev_id)
3035 {
3036         bool    using_dac = false;
3037         int     err;
3038         struct bnad *bnad;
3039         struct bna *bna;
3040         struct net_device *netdev;
3041         struct bfa_pcidev pcidev_info;
3042         unsigned long flags;
3043
3044         pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3045                pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3046
3047         mutex_lock(&bnad_fwimg_mutex);
3048         if (!cna_get_firmware_buf(pdev)) {
3049                 mutex_unlock(&bnad_fwimg_mutex);
3050                 pr_warn("Failed to load Firmware Image!\n");
3051                 return -ENODEV;
3052         }
3053         mutex_unlock(&bnad_fwimg_mutex);
3054
3055         /*
3056          * Allocates sizeof(struct net_device + struct bnad)
3057          * bnad = netdev->priv
3058          */
3059         netdev = alloc_etherdev(sizeof(struct bnad));
3060         if (!netdev) {
3061                 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3062                 err = -ENOMEM;
3063                 return err;
3064         }
3065         bnad = netdev_priv(netdev);
3066
3067         /*
3068          * PCI initialization
3069          *      Output : using_dac = 1 for 64 bit DMA
3070          *                         = 0 for 32 bit DMA
3071          */
3072         err = bnad_pci_init(bnad, pdev, &using_dac);
3073         if (err)
3074                 goto free_netdev;
3075
3076         bnad_lock_init(bnad);
3077         /*
3078          * Initialize bnad structure
3079          * Setup relation between pci_dev & netdev
3080          * Init Tx free tasklet
3081          */
3082         err = bnad_init(bnad, pdev, netdev);
3083         if (err)
3084                 goto pci_uninit;
3085         /* Initialize netdev structure, set up ethtool ops */
3086         bnad_netdev_init(bnad, using_dac);
3087
3088         /* Set link to down state */
3089         netif_carrier_off(netdev);
3090
3091         bnad_enable_msix(bnad);
3092
3093         /* Get resource requirement form bna */
3094         bna_res_req(&bnad->res_info[0]);
3095
3096         /* Allocate resources from bna */
3097         err = bnad_res_alloc(bnad);
3098         if (err)
3099                 goto free_netdev;
3100
3101         bna = &bnad->bna;
3102
3103         /* Setup pcidev_info for bna_init() */
3104         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3105         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3106         pcidev_info.device_id = bnad->pcidev->device;
3107         pcidev_info.pci_bar_kva = bnad->bar0;
3108
3109         mutex_lock(&bnad->conf_mutex);
3110
3111         spin_lock_irqsave(&bnad->bna_lock, flags);
3112         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3113         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3114
3115         bnad->stats.bna_stats = &bna->stats;
3116
3117         /* Set up timers */
3118         setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3119                                 ((unsigned long)bnad));
3120         setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3121                                 ((unsigned long)bnad));
3122         setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3123                                 ((unsigned long)bnad));
3124         setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
3125                                 ((unsigned long)bnad));
3126
3127         /* Now start the timer before calling IOC */
3128         mod_timer(&bnad->bna.device.ioc.iocpf_timer,
3129                   jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3130
3131         /*
3132          * Start the chip
3133          * Don't care even if err != 0, bna state machine will
3134          * deal with it
3135          */
3136         err = bnad_device_enable(bnad);
3137
3138         /* Get the burnt-in mac */
3139         spin_lock_irqsave(&bnad->bna_lock, flags);
3140         bna_port_mac_get(&bna->port, &bnad->perm_addr);
3141         bnad_set_netdev_perm_addr(bnad);
3142         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3143
3144         mutex_unlock(&bnad->conf_mutex);
3145
3146         /* Finally, reguister with net_device layer */
3147         err = register_netdev(netdev);
3148         if (err) {
3149                 pr_err("BNA : Registering with netdev failed\n");
3150                 goto disable_device;
3151         }
3152
3153         return 0;
3154
3155 disable_device:
3156         mutex_lock(&bnad->conf_mutex);
3157         bnad_device_disable(bnad);
3158         del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3159         del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3160         del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3161         spin_lock_irqsave(&bnad->bna_lock, flags);
3162         bna_uninit(bna);
3163         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3164         mutex_unlock(&bnad->conf_mutex);
3165
3166         bnad_res_free(bnad);
3167         bnad_disable_msix(bnad);
3168 pci_uninit:
3169         bnad_pci_uninit(pdev);
3170         bnad_lock_uninit(bnad);
3171         bnad_uninit(bnad);
3172 free_netdev:
3173         free_netdev(netdev);
3174         return err;
3175 }
3176
3177 static void __devexit
3178 bnad_pci_remove(struct pci_dev *pdev)
3179 {
3180         struct net_device *netdev = pci_get_drvdata(pdev);
3181         struct bnad *bnad;
3182         struct bna *bna;
3183         unsigned long flags;
3184
3185         if (!netdev)
3186                 return;
3187
3188         pr_info("%s bnad_pci_remove\n", netdev->name);
3189         bnad = netdev_priv(netdev);
3190         bna = &bnad->bna;
3191
3192         unregister_netdev(netdev);
3193
3194         mutex_lock(&bnad->conf_mutex);
3195         bnad_device_disable(bnad);
3196         del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3197         del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3198         del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3199         spin_lock_irqsave(&bnad->bna_lock, flags);
3200         bna_uninit(bna);
3201         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3202         mutex_unlock(&bnad->conf_mutex);
3203
3204         bnad_res_free(bnad);
3205         bnad_disable_msix(bnad);
3206         bnad_pci_uninit(pdev);
3207         bnad_lock_uninit(bnad);
3208         bnad_uninit(bnad);
3209         free_netdev(netdev);
3210 }
3211
3212 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3213         {
3214                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3215                         PCI_DEVICE_ID_BROCADE_CT),
3216                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3217                 .class_mask =  0xffff00
3218         }, {0,  }
3219 };
3220
3221 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3222
3223 static struct pci_driver bnad_pci_driver = {
3224         .name = BNAD_NAME,
3225         .id_table = bnad_pci_id_table,
3226         .probe = bnad_pci_probe,
3227         .remove = __devexit_p(bnad_pci_remove),
3228 };
3229
3230 static int __init
3231 bnad_module_init(void)
3232 {
3233         int err;
3234
3235         pr_info("Brocade 10G Ethernet driver - version: %s\n",
3236                         BNAD_VERSION);
3237
3238         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3239
3240         err = pci_register_driver(&bnad_pci_driver);
3241         if (err < 0) {
3242                 pr_err("bna : PCI registration failed in module init "
3243                        "(%d)\n", err);
3244                 return err;
3245         }
3246
3247         return 0;
3248 }
3249
3250 static void __exit
3251 bnad_module_exit(void)
3252 {
3253         pci_unregister_driver(&bnad_pci_driver);
3254
3255         if (bfi_fw)
3256                 release_firmware(bfi_fw);
3257 }
3258
3259 module_init(bnad_module_init);
3260 module_exit(bnad_module_exit);
3261
3262 MODULE_AUTHOR("Brocade");
3263 MODULE_LICENSE("GPL");
3264 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3265 MODULE_VERSION(BNAD_VERSION);
3266 MODULE_FIRMWARE(CNA_FW_FILE_CT);