Merge git://github.com/Jkirsher/net-next
[pandora-kernel.git] / drivers / net / ethernet / brocade / bna / bnad.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28
29 #include "bnad.h"
30 #include "bna.h"
31 #include "cna.h"
32
33 static DEFINE_MUTEX(bnad_fwimg_mutex);
34
35 /*
36  * Module params
37  */
38 static uint bnad_msix_disable;
39 module_param(bnad_msix_disable, uint, 0444);
40 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42 static uint bnad_ioc_auto_recover = 1;
43 module_param(bnad_ioc_auto_recover, uint, 0444);
44 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46 /*
47  * Global variables
48  */
49 u32 bnad_rxqs_per_cq = 2;
50
51 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52
53 /*
54  * Local MACROS
55  */
56 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
61         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
62          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
63          ((_bnad)->pcidev->irq))
64
65 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)       \
66 do {                                                            \
67         (_res_info)->res_type = BNA_RES_T_MEM;                  \
68         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
69         (_res_info)->res_u.mem_info.num = (_num);               \
70         (_res_info)->res_u.mem_info.len =                       \
71         sizeof(struct bnad_unmap_q) +                           \
72         (sizeof(struct bnad_skb_unmap) * ((_depth) - 1));       \
73 } while (0)
74
75 #define BNAD_TXRX_SYNC_MDELAY   250     /* 250 msecs */
76
77 /*
78  * Reinitialize completions in CQ, once Rx is taken down
79  */
80 static void
81 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82 {
83         struct bna_cq_entry *cmpl, *next_cmpl;
84         unsigned int wi_range, wis = 0, ccb_prod = 0;
85         int i;
86
87         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88                             wi_range);
89
90         for (i = 0; i < ccb->q_depth; i++) {
91                 wis++;
92                 if (likely(--wi_range))
93                         next_cmpl = cmpl + 1;
94                 else {
95                         BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96                         wis = 0;
97                         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98                                                 next_cmpl, wi_range);
99                 }
100                 cmpl->valid = 0;
101                 cmpl = next_cmpl;
102         }
103 }
104
105 static u32
106 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107         u32 index, u32 depth, struct sk_buff *skb, u32 frag)
108 {
109         int j;
110         array[index].skb = NULL;
111
112         dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113                         skb_headlen(skb), DMA_TO_DEVICE);
114         dma_unmap_addr_set(&array[index], dma_addr, 0);
115         BNA_QE_INDX_ADD(index, 1, depth);
116
117         for (j = 0; j < frag; j++) {
118                 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119                           skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
120                 dma_unmap_addr_set(&array[index], dma_addr, 0);
121                 BNA_QE_INDX_ADD(index, 1, depth);
122         }
123
124         return index;
125 }
126
127 /*
128  * Frees all pending Tx Bufs
129  * At this point no activity is expected on the Q,
130  * so DMA unmap & freeing is fine.
131  */
132 static void
133 bnad_free_all_txbufs(struct bnad *bnad,
134                  struct bna_tcb *tcb)
135 {
136         u32             unmap_cons;
137         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138         struct bnad_skb_unmap *unmap_array;
139         struct sk_buff          *skb = NULL;
140         int                     q;
141
142         unmap_array = unmap_q->unmap_array;
143
144         for (q = 0; q < unmap_q->q_depth; q++) {
145                 skb = unmap_array[q].skb;
146                 if (!skb)
147                         continue;
148
149                 unmap_cons = q;
150                 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
151                                 unmap_cons, unmap_q->q_depth, skb,
152                                 skb_shinfo(skb)->nr_frags);
153
154                 dev_kfree_skb_any(skb);
155         }
156 }
157
158 /* Data Path Handlers */
159
160 /*
161  * bnad_free_txbufs : Frees the Tx bufs on Tx completion
162  * Can be called in a) Interrupt context
163  *                  b) Sending context
164  *                  c) Tasklet context
165  */
166 static u32
167 bnad_free_txbufs(struct bnad *bnad,
168                  struct bna_tcb *tcb)
169 {
170         u32             unmap_cons, sent_packets = 0, sent_bytes = 0;
171         u16             wis, updated_hw_cons;
172         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
173         struct bnad_skb_unmap *unmap_array;
174         struct sk_buff          *skb;
175
176         /*
177          * Just return if TX is stopped. This check is useful
178          * when bnad_free_txbufs() runs out of a tasklet scheduled
179          * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
180          * but this routine runs actually after the cleanup has been
181          * executed.
182          */
183         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
184                 return 0;
185
186         updated_hw_cons = *(tcb->hw_consumer_index);
187
188         wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
189                                   updated_hw_cons, tcb->q_depth);
190
191         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
192
193         unmap_array = unmap_q->unmap_array;
194         unmap_cons = unmap_q->consumer_index;
195
196         prefetch(&unmap_array[unmap_cons + 1]);
197         while (wis) {
198                 skb = unmap_array[unmap_cons].skb;
199
200                 sent_packets++;
201                 sent_bytes += skb->len;
202                 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203
204                 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
205                                 unmap_cons, unmap_q->q_depth, skb,
206                                 skb_shinfo(skb)->nr_frags);
207
208                 dev_kfree_skb_any(skb);
209         }
210
211         /* Update consumer pointers. */
212         tcb->consumer_index = updated_hw_cons;
213         unmap_q->consumer_index = unmap_cons;
214
215         tcb->txq->tx_packets += sent_packets;
216         tcb->txq->tx_bytes += sent_bytes;
217
218         return sent_packets;
219 }
220
221 /* Tx Free Tasklet function */
222 /* Frees for all the tcb's in all the Tx's */
223 /*
224  * Scheduled from sending context, so that
225  * the fat Tx lock is not held for too long
226  * in the sending context.
227  */
228 static void
229 bnad_tx_free_tasklet(unsigned long bnad_ptr)
230 {
231         struct bnad *bnad = (struct bnad *)bnad_ptr;
232         struct bna_tcb *tcb;
233         u32             acked = 0;
234         int                     i, j;
235
236         for (i = 0; i < bnad->num_tx; i++) {
237                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
238                         tcb = bnad->tx_info[i].tcb[j];
239                         if (!tcb)
240                                 continue;
241                         if (((u16) (*tcb->hw_consumer_index) !=
242                                 tcb->consumer_index) &&
243                                 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
244                                                   &tcb->flags))) {
245                                 acked = bnad_free_txbufs(bnad, tcb);
246                                 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
247                                         &tcb->flags)))
248                                         bna_ib_ack(tcb->i_dbell, acked);
249                                 smp_mb__before_clear_bit();
250                                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
251                         }
252                         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
253                                                 &tcb->flags)))
254                                 continue;
255                         if (netif_queue_stopped(bnad->netdev)) {
256                                 if (acked && netif_carrier_ok(bnad->netdev) &&
257                                         BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
258                                                 BNAD_NETIF_WAKE_THRESHOLD) {
259                                         netif_wake_queue(bnad->netdev);
260                                         /* TODO */
261                                         /* Counters for individual TxQs? */
262                                         BNAD_UPDATE_CTR(bnad,
263                                                 netif_queue_wakeup);
264                                 }
265                         }
266                 }
267         }
268 }
269
270 static u32
271 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
272 {
273         struct net_device *netdev = bnad->netdev;
274         u32 sent = 0;
275
276         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
277                 return 0;
278
279         sent = bnad_free_txbufs(bnad, tcb);
280         if (sent) {
281                 if (netif_queue_stopped(netdev) &&
282                     netif_carrier_ok(netdev) &&
283                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
284                                     BNAD_NETIF_WAKE_THRESHOLD) {
285                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
286                                 netif_wake_queue(netdev);
287                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
288                         }
289                 }
290         }
291
292         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
293                 bna_ib_ack(tcb->i_dbell, sent);
294
295         smp_mb__before_clear_bit();
296         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
297
298         return sent;
299 }
300
301 /* MSIX Tx Completion Handler */
302 static irqreturn_t
303 bnad_msix_tx(int irq, void *data)
304 {
305         struct bna_tcb *tcb = (struct bna_tcb *)data;
306         struct bnad *bnad = tcb->bnad;
307
308         bnad_tx(bnad, tcb);
309
310         return IRQ_HANDLED;
311 }
312
313 static void
314 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
315 {
316         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
317
318         rcb->producer_index = 0;
319         rcb->consumer_index = 0;
320
321         unmap_q->producer_index = 0;
322         unmap_q->consumer_index = 0;
323 }
324
325 static void
326 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
327 {
328         struct bnad_unmap_q *unmap_q;
329         struct bnad_skb_unmap *unmap_array;
330         struct sk_buff *skb;
331         int unmap_cons;
332
333         unmap_q = rcb->unmap_q;
334         unmap_array = unmap_q->unmap_array;
335         for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
336                 skb = unmap_array[unmap_cons].skb;
337                 if (!skb)
338                         continue;
339                 unmap_array[unmap_cons].skb = NULL;
340                 dma_unmap_single(&bnad->pcidev->dev,
341                                  dma_unmap_addr(&unmap_array[unmap_cons],
342                                                 dma_addr),
343                                  rcb->rxq->buffer_size,
344                                  DMA_FROM_DEVICE);
345                 dev_kfree_skb(skb);
346         }
347         bnad_reset_rcb(bnad, rcb);
348 }
349
350 static void
351 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
352 {
353         u16 to_alloc, alloced, unmap_prod, wi_range;
354         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
355         struct bnad_skb_unmap *unmap_array;
356         struct bna_rxq_entry *rxent;
357         struct sk_buff *skb;
358         dma_addr_t dma_addr;
359
360         alloced = 0;
361         to_alloc =
362                 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
363
364         unmap_array = unmap_q->unmap_array;
365         unmap_prod = unmap_q->producer_index;
366
367         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
368
369         while (to_alloc--) {
370                 if (!wi_range)
371                         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
372                                              wi_range);
373                 skb = netdev_alloc_skb_ip_align(bnad->netdev,
374                                                 rcb->rxq->buffer_size);
375                 if (unlikely(!skb)) {
376                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
377                         rcb->rxq->rxbuf_alloc_failed++;
378                         goto finishing;
379                 }
380                 unmap_array[unmap_prod].skb = skb;
381                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
382                                           rcb->rxq->buffer_size,
383                                           DMA_FROM_DEVICE);
384                 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
385                                    dma_addr);
386                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
387                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
388
389                 rxent++;
390                 wi_range--;
391                 alloced++;
392         }
393
394 finishing:
395         if (likely(alloced)) {
396                 unmap_q->producer_index = unmap_prod;
397                 rcb->producer_index = unmap_prod;
398                 smp_mb();
399                 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
400                         bna_rxq_prod_indx_doorbell(rcb);
401         }
402 }
403
404 static inline void
405 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
406 {
407         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
408
409         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
410                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
411                          >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
412                         bnad_alloc_n_post_rxbufs(bnad, rcb);
413                 smp_mb__before_clear_bit();
414                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
415         }
416 }
417
418 static u32
419 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
420 {
421         struct bna_cq_entry *cmpl, *next_cmpl;
422         struct bna_rcb *rcb = NULL;
423         unsigned int wi_range, packets = 0, wis = 0;
424         struct bnad_unmap_q *unmap_q;
425         struct bnad_skb_unmap *unmap_array;
426         struct sk_buff *skb;
427         u32 flags, unmap_cons;
428         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
429         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
430
431         set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
432
433         if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
434                 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
435                 return 0;
436         }
437
438         prefetch(bnad->netdev);
439         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
440                             wi_range);
441         BUG_ON(!(wi_range <= ccb->q_depth));
442         while (cmpl->valid && packets < budget) {
443                 packets++;
444                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
445
446                 if (bna_is_small_rxq(cmpl->rxq_id))
447                         rcb = ccb->rcb[1];
448                 else
449                         rcb = ccb->rcb[0];
450
451                 unmap_q = rcb->unmap_q;
452                 unmap_array = unmap_q->unmap_array;
453                 unmap_cons = unmap_q->consumer_index;
454
455                 skb = unmap_array[unmap_cons].skb;
456                 BUG_ON(!(skb));
457                 unmap_array[unmap_cons].skb = NULL;
458                 dma_unmap_single(&bnad->pcidev->dev,
459                                  dma_unmap_addr(&unmap_array[unmap_cons],
460                                                 dma_addr),
461                                  rcb->rxq->buffer_size,
462                                  DMA_FROM_DEVICE);
463                 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
464
465                 /* Should be more efficient ? Performance ? */
466                 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
467
468                 wis++;
469                 if (likely(--wi_range))
470                         next_cmpl = cmpl + 1;
471                 else {
472                         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
473                         wis = 0;
474                         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
475                                                 next_cmpl, wi_range);
476                         BUG_ON(!(wi_range <= ccb->q_depth));
477                 }
478                 prefetch(next_cmpl);
479
480                 flags = ntohl(cmpl->flags);
481                 if (unlikely
482                     (flags &
483                      (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
484                       BNA_CQ_EF_TOO_LONG))) {
485                         dev_kfree_skb_any(skb);
486                         rcb->rxq->rx_packets_with_error++;
487                         goto next;
488                 }
489
490                 skb_put(skb, ntohs(cmpl->length));
491                 if (likely
492                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
493                      (((flags & BNA_CQ_EF_IPV4) &&
494                       (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
495                       (flags & BNA_CQ_EF_IPV6)) &&
496                       (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
497                       (flags & BNA_CQ_EF_L4_CKSUM_OK)))
498                         skb->ip_summed = CHECKSUM_UNNECESSARY;
499                 else
500                         skb_checksum_none_assert(skb);
501
502                 rcb->rxq->rx_packets++;
503                 rcb->rxq->rx_bytes += skb->len;
504                 skb->protocol = eth_type_trans(skb, bnad->netdev);
505
506                 if (flags & BNA_CQ_EF_VLAN)
507                         __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
508
509                 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
510                         napi_gro_receive(&rx_ctrl->napi, skb);
511                 else {
512                         netif_receive_skb(skb);
513                 }
514
515 next:
516                 cmpl->valid = 0;
517                 cmpl = next_cmpl;
518         }
519
520         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
521
522         if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
523                 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
524
525         bnad_refill_rxq(bnad, ccb->rcb[0]);
526         if (ccb->rcb[1])
527                 bnad_refill_rxq(bnad, ccb->rcb[1]);
528
529         clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
530
531         return packets;
532 }
533
534 static void
535 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
536 {
537         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
538         struct napi_struct *napi = &rx_ctrl->napi;
539
540         if (likely(napi_schedule_prep(napi))) {
541                 __napi_schedule(napi);
542                 rx_ctrl->rx_schedule++;
543         }
544 }
545
546 /* MSIX Rx Path Handler */
547 static irqreturn_t
548 bnad_msix_rx(int irq, void *data)
549 {
550         struct bna_ccb *ccb = (struct bna_ccb *)data;
551
552         if (ccb) {
553                 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
554                 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
555         }
556
557         return IRQ_HANDLED;
558 }
559
560 /* Interrupt handlers */
561
562 /* Mbox Interrupt Handlers */
563 static irqreturn_t
564 bnad_msix_mbox_handler(int irq, void *data)
565 {
566         u32 intr_status;
567         unsigned long flags;
568         struct bnad *bnad = (struct bnad *)data;
569
570         spin_lock_irqsave(&bnad->bna_lock, flags);
571         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
572                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
573                 return IRQ_HANDLED;
574         }
575
576         bna_intr_status_get(&bnad->bna, intr_status);
577
578         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
579                 bna_mbox_handler(&bnad->bna, intr_status);
580
581         spin_unlock_irqrestore(&bnad->bna_lock, flags);
582
583         return IRQ_HANDLED;
584 }
585
586 static irqreturn_t
587 bnad_isr(int irq, void *data)
588 {
589         int i, j;
590         u32 intr_status;
591         unsigned long flags;
592         struct bnad *bnad = (struct bnad *)data;
593         struct bnad_rx_info *rx_info;
594         struct bnad_rx_ctrl *rx_ctrl;
595         struct bna_tcb *tcb = NULL;
596
597         spin_lock_irqsave(&bnad->bna_lock, flags);
598         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
599                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
600                 return IRQ_NONE;
601         }
602
603         bna_intr_status_get(&bnad->bna, intr_status);
604
605         if (unlikely(!intr_status)) {
606                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
607                 return IRQ_NONE;
608         }
609
610         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
611                 bna_mbox_handler(&bnad->bna, intr_status);
612
613         spin_unlock_irqrestore(&bnad->bna_lock, flags);
614
615         if (!BNA_IS_INTX_DATA_INTR(intr_status))
616                 return IRQ_HANDLED;
617
618         /* Process data interrupts */
619         /* Tx processing */
620         for (i = 0; i < bnad->num_tx; i++) {
621                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
622                         tcb = bnad->tx_info[i].tcb[j];
623                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
624                                 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
625                 }
626         }
627         /* Rx processing */
628         for (i = 0; i < bnad->num_rx; i++) {
629                 rx_info = &bnad->rx_info[i];
630                 if (!rx_info->rx)
631                         continue;
632                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
633                         rx_ctrl = &rx_info->rx_ctrl[j];
634                         if (rx_ctrl->ccb)
635                                 bnad_netif_rx_schedule_poll(bnad,
636                                                             rx_ctrl->ccb);
637                 }
638         }
639         return IRQ_HANDLED;
640 }
641
642 /*
643  * Called in interrupt / callback context
644  * with bna_lock held, so cfg_flags access is OK
645  */
646 static void
647 bnad_enable_mbox_irq(struct bnad *bnad)
648 {
649         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
650
651         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
652 }
653
654 /*
655  * Called with bnad->bna_lock held b'cos of
656  * bnad->cfg_flags access.
657  */
658 static void
659 bnad_disable_mbox_irq(struct bnad *bnad)
660 {
661         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
662
663         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
664 }
665
666 static void
667 bnad_set_netdev_perm_addr(struct bnad *bnad)
668 {
669         struct net_device *netdev = bnad->netdev;
670
671         memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
672         if (is_zero_ether_addr(netdev->dev_addr))
673                 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
674 }
675
676 /* Control Path Handlers */
677
678 /* Callbacks */
679 void
680 bnad_cb_mbox_intr_enable(struct bnad *bnad)
681 {
682         bnad_enable_mbox_irq(bnad);
683 }
684
685 void
686 bnad_cb_mbox_intr_disable(struct bnad *bnad)
687 {
688         bnad_disable_mbox_irq(bnad);
689 }
690
691 void
692 bnad_cb_ioceth_ready(struct bnad *bnad)
693 {
694         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
695         complete(&bnad->bnad_completions.ioc_comp);
696 }
697
698 void
699 bnad_cb_ioceth_failed(struct bnad *bnad)
700 {
701         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
702         complete(&bnad->bnad_completions.ioc_comp);
703 }
704
705 void
706 bnad_cb_ioceth_disabled(struct bnad *bnad)
707 {
708         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
709         complete(&bnad->bnad_completions.ioc_comp);
710 }
711
712 static void
713 bnad_cb_enet_disabled(void *arg)
714 {
715         struct bnad *bnad = (struct bnad *)arg;
716
717         netif_carrier_off(bnad->netdev);
718         complete(&bnad->bnad_completions.enet_comp);
719 }
720
721 void
722 bnad_cb_ethport_link_status(struct bnad *bnad,
723                         enum bna_link_status link_status)
724 {
725         bool link_up = 0;
726
727         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
728
729         if (link_status == BNA_CEE_UP) {
730                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
731                         BNAD_UPDATE_CTR(bnad, cee_toggle);
732                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
733         } else {
734                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
735                         BNAD_UPDATE_CTR(bnad, cee_toggle);
736                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
737         }
738
739         if (link_up) {
740                 if (!netif_carrier_ok(bnad->netdev)) {
741                         uint tx_id, tcb_id;
742                         printk(KERN_WARNING "bna: %s link up\n",
743                                 bnad->netdev->name);
744                         netif_carrier_on(bnad->netdev);
745                         BNAD_UPDATE_CTR(bnad, link_toggle);
746                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
747                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
748                                       tcb_id++) {
749                                         struct bna_tcb *tcb =
750                                         bnad->tx_info[tx_id].tcb[tcb_id];
751                                         u32 txq_id;
752                                         if (!tcb)
753                                                 continue;
754
755                                         txq_id = tcb->id;
756
757                                         if (test_bit(BNAD_TXQ_TX_STARTED,
758                                                      &tcb->flags)) {
759                                                 /*
760                                                  * Force an immediate
761                                                  * Transmit Schedule */
762                                                 printk(KERN_INFO "bna: %s %d "
763                                                       "TXQ_STARTED\n",
764                                                        bnad->netdev->name,
765                                                        txq_id);
766                                                 netif_wake_subqueue(
767                                                                 bnad->netdev,
768                                                                 txq_id);
769                                                 BNAD_UPDATE_CTR(bnad,
770                                                         netif_queue_wakeup);
771                                         } else {
772                                                 netif_stop_subqueue(
773                                                                 bnad->netdev,
774                                                                 txq_id);
775                                                 BNAD_UPDATE_CTR(bnad,
776                                                         netif_queue_stop);
777                                         }
778                                 }
779                         }
780                 }
781         } else {
782                 if (netif_carrier_ok(bnad->netdev)) {
783                         printk(KERN_WARNING "bna: %s link down\n",
784                                 bnad->netdev->name);
785                         netif_carrier_off(bnad->netdev);
786                         BNAD_UPDATE_CTR(bnad, link_toggle);
787                 }
788         }
789 }
790
791 static void
792 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
793 {
794         struct bnad *bnad = (struct bnad *)arg;
795
796         complete(&bnad->bnad_completions.tx_comp);
797 }
798
799 static void
800 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
801 {
802         struct bnad_tx_info *tx_info =
803                         (struct bnad_tx_info *)tcb->txq->tx->priv;
804         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
805
806         tx_info->tcb[tcb->id] = tcb;
807         unmap_q->producer_index = 0;
808         unmap_q->consumer_index = 0;
809         unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
810 }
811
812 static void
813 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
814 {
815         struct bnad_tx_info *tx_info =
816                         (struct bnad_tx_info *)tcb->txq->tx->priv;
817         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
818
819         while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
820                 cpu_relax();
821
822         bnad_free_all_txbufs(bnad, tcb);
823
824         unmap_q->producer_index = 0;
825         unmap_q->consumer_index = 0;
826
827         smp_mb__before_clear_bit();
828         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
829
830         tx_info->tcb[tcb->id] = NULL;
831 }
832
833 static void
834 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
835 {
836         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
837
838         unmap_q->producer_index = 0;
839         unmap_q->consumer_index = 0;
840         unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
841 }
842
843 static void
844 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
845 {
846         bnad_free_all_rxbufs(bnad, rcb);
847 }
848
849 static void
850 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
851 {
852         struct bnad_rx_info *rx_info =
853                         (struct bnad_rx_info *)ccb->cq->rx->priv;
854
855         rx_info->rx_ctrl[ccb->id].ccb = ccb;
856         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
857 }
858
859 static void
860 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
861 {
862         struct bnad_rx_info *rx_info =
863                         (struct bnad_rx_info *)ccb->cq->rx->priv;
864
865         rx_info->rx_ctrl[ccb->id].ccb = NULL;
866 }
867
868 static void
869 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
870 {
871         struct bnad_tx_info *tx_info =
872                         (struct bnad_tx_info *)tx->priv;
873         struct bna_tcb *tcb;
874         u32 txq_id;
875         int i;
876
877         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
878                 tcb = tx_info->tcb[i];
879                 if (!tcb)
880                         continue;
881                 txq_id = tcb->id;
882                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
883                 netif_stop_subqueue(bnad->netdev, txq_id);
884                 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
885                         bnad->netdev->name, txq_id);
886         }
887 }
888
889 static void
890 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
891 {
892         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
893         struct bna_tcb *tcb;
894         struct bnad_unmap_q *unmap_q;
895         u32 txq_id;
896         int i;
897
898         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
899                 tcb = tx_info->tcb[i];
900                 if (!tcb)
901                         continue;
902                 txq_id = tcb->id;
903
904                 unmap_q = tcb->unmap_q;
905
906                 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
907                         continue;
908
909                 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
910                         cpu_relax();
911
912                 bnad_free_all_txbufs(bnad, tcb);
913
914                 unmap_q->producer_index = 0;
915                 unmap_q->consumer_index = 0;
916
917                 smp_mb__before_clear_bit();
918                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
919
920                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
921
922                 if (netif_carrier_ok(bnad->netdev)) {
923                         printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
924                                 bnad->netdev->name, txq_id);
925                         netif_wake_subqueue(bnad->netdev, txq_id);
926                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
927                 }
928         }
929
930         /*
931          * Workaround for first ioceth enable failure & we
932          * get a 0 MAC address. We try to get the MAC address
933          * again here.
934          */
935         if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
936                 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
937                 bnad_set_netdev_perm_addr(bnad);
938         }
939 }
940
941 static void
942 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
943 {
944         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
945         struct bna_tcb *tcb;
946         int i;
947
948         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
949                 tcb = tx_info->tcb[i];
950                 if (!tcb)
951                         continue;
952         }
953
954         mdelay(BNAD_TXRX_SYNC_MDELAY);
955         bna_tx_cleanup_complete(tx);
956 }
957
958 static void
959 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
960 {
961         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
962         struct bna_ccb *ccb;
963         struct bnad_rx_ctrl *rx_ctrl;
964         int i;
965
966         mdelay(BNAD_TXRX_SYNC_MDELAY);
967
968         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
969                 rx_ctrl = &rx_info->rx_ctrl[i];
970                 ccb = rx_ctrl->ccb;
971                 if (!ccb)
972                         continue;
973
974                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
975
976                 if (ccb->rcb[1])
977                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
978
979                 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
980                         cpu_relax();
981         }
982
983         bna_rx_cleanup_complete(rx);
984 }
985
986 static void
987 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
988 {
989         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
990         struct bna_ccb *ccb;
991         struct bna_rcb *rcb;
992         struct bnad_rx_ctrl *rx_ctrl;
993         struct bnad_unmap_q *unmap_q;
994         int i;
995         int j;
996
997         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
998                 rx_ctrl = &rx_info->rx_ctrl[i];
999                 ccb = rx_ctrl->ccb;
1000                 if (!ccb)
1001                         continue;
1002
1003                 bnad_cq_cmpl_init(bnad, ccb);
1004
1005                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1006                         rcb = ccb->rcb[j];
1007                         if (!rcb)
1008                                 continue;
1009                         bnad_free_all_rxbufs(bnad, rcb);
1010
1011                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1012                         unmap_q = rcb->unmap_q;
1013
1014                         /* Now allocate & post buffers for this RCB */
1015                         /* !!Allocation in callback context */
1016                         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1017                                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1018                                         >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1019                                         bnad_alloc_n_post_rxbufs(bnad, rcb);
1020                                         smp_mb__before_clear_bit();
1021                                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1022                         }
1023                 }
1024         }
1025 }
1026
1027 static void
1028 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1029 {
1030         struct bnad *bnad = (struct bnad *)arg;
1031
1032         complete(&bnad->bnad_completions.rx_comp);
1033 }
1034
1035 static void
1036 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1037 {
1038         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1039         complete(&bnad->bnad_completions.mcast_comp);
1040 }
1041
1042 void
1043 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1044                        struct bna_stats *stats)
1045 {
1046         if (status == BNA_CB_SUCCESS)
1047                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1048
1049         if (!netif_running(bnad->netdev) ||
1050                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1051                 return;
1052
1053         mod_timer(&bnad->stats_timer,
1054                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1055 }
1056
1057 static void
1058 bnad_cb_enet_mtu_set(struct bnad *bnad)
1059 {
1060         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1061         complete(&bnad->bnad_completions.mtu_comp);
1062 }
1063
1064 /* Resource allocation, free functions */
1065
1066 static void
1067 bnad_mem_free(struct bnad *bnad,
1068               struct bna_mem_info *mem_info)
1069 {
1070         int i;
1071         dma_addr_t dma_pa;
1072
1073         if (mem_info->mdl == NULL)
1074                 return;
1075
1076         for (i = 0; i < mem_info->num; i++) {
1077                 if (mem_info->mdl[i].kva != NULL) {
1078                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1079                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1080                                                 dma_pa);
1081                                 dma_free_coherent(&bnad->pcidev->dev,
1082                                                   mem_info->mdl[i].len,
1083                                                   mem_info->mdl[i].kva, dma_pa);
1084                         } else
1085                                 kfree(mem_info->mdl[i].kva);
1086                 }
1087         }
1088         kfree(mem_info->mdl);
1089         mem_info->mdl = NULL;
1090 }
1091
1092 static int
1093 bnad_mem_alloc(struct bnad *bnad,
1094                struct bna_mem_info *mem_info)
1095 {
1096         int i;
1097         dma_addr_t dma_pa;
1098
1099         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1100                 mem_info->mdl = NULL;
1101                 return 0;
1102         }
1103
1104         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1105                                 GFP_KERNEL);
1106         if (mem_info->mdl == NULL)
1107                 return -ENOMEM;
1108
1109         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1110                 for (i = 0; i < mem_info->num; i++) {
1111                         mem_info->mdl[i].len = mem_info->len;
1112                         mem_info->mdl[i].kva =
1113                                 dma_alloc_coherent(&bnad->pcidev->dev,
1114                                                 mem_info->len, &dma_pa,
1115                                                 GFP_KERNEL);
1116
1117                         if (mem_info->mdl[i].kva == NULL)
1118                                 goto err_return;
1119
1120                         BNA_SET_DMA_ADDR(dma_pa,
1121                                          &(mem_info->mdl[i].dma));
1122                 }
1123         } else {
1124                 for (i = 0; i < mem_info->num; i++) {
1125                         mem_info->mdl[i].len = mem_info->len;
1126                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1127                                                         GFP_KERNEL);
1128                         if (mem_info->mdl[i].kva == NULL)
1129                                 goto err_return;
1130                 }
1131         }
1132
1133         return 0;
1134
1135 err_return:
1136         bnad_mem_free(bnad, mem_info);
1137         return -ENOMEM;
1138 }
1139
1140 /* Free IRQ for Mailbox */
1141 static void
1142 bnad_mbox_irq_free(struct bnad *bnad)
1143 {
1144         int irq;
1145         unsigned long flags;
1146
1147         spin_lock_irqsave(&bnad->bna_lock, flags);
1148         bnad_disable_mbox_irq(bnad);
1149         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1150
1151         irq = BNAD_GET_MBOX_IRQ(bnad);
1152         free_irq(irq, bnad);
1153 }
1154
1155 /*
1156  * Allocates IRQ for Mailbox, but keep it disabled
1157  * This will be enabled once we get the mbox enable callback
1158  * from bna
1159  */
1160 static int
1161 bnad_mbox_irq_alloc(struct bnad *bnad)
1162 {
1163         int             err = 0;
1164         unsigned long   irq_flags, flags;
1165         u32     irq;
1166         irq_handler_t   irq_handler;
1167
1168         spin_lock_irqsave(&bnad->bna_lock, flags);
1169         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1170                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1171                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1172                 irq_flags = 0;
1173         } else {
1174                 irq_handler = (irq_handler_t)bnad_isr;
1175                 irq = bnad->pcidev->irq;
1176                 irq_flags = IRQF_SHARED;
1177         }
1178
1179         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1180         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1181
1182         /*
1183          * Set the Mbox IRQ disable flag, so that the IRQ handler
1184          * called from request_irq() for SHARED IRQs do not execute
1185          */
1186         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1187
1188         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1189
1190         err = request_irq(irq, irq_handler, irq_flags,
1191                           bnad->mbox_irq_name, bnad);
1192
1193         return err;
1194 }
1195
1196 static void
1197 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1198 {
1199         kfree(intr_info->idl);
1200         intr_info->idl = NULL;
1201 }
1202
1203 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1204 static int
1205 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1206                     u32 txrx_id, struct bna_intr_info *intr_info)
1207 {
1208         int i, vector_start = 0;
1209         u32 cfg_flags;
1210         unsigned long flags;
1211
1212         spin_lock_irqsave(&bnad->bna_lock, flags);
1213         cfg_flags = bnad->cfg_flags;
1214         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1215
1216         if (cfg_flags & BNAD_CF_MSIX) {
1217                 intr_info->intr_type = BNA_INTR_T_MSIX;
1218                 intr_info->idl = kcalloc(intr_info->num,
1219                                         sizeof(struct bna_intr_descr),
1220                                         GFP_KERNEL);
1221                 if (!intr_info->idl)
1222                         return -ENOMEM;
1223
1224                 switch (src) {
1225                 case BNAD_INTR_TX:
1226                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1227                         break;
1228
1229                 case BNAD_INTR_RX:
1230                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1231                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1232                                         txrx_id;
1233                         break;
1234
1235                 default:
1236                         BUG();
1237                 }
1238
1239                 for (i = 0; i < intr_info->num; i++)
1240                         intr_info->idl[i].vector = vector_start + i;
1241         } else {
1242                 intr_info->intr_type = BNA_INTR_T_INTX;
1243                 intr_info->num = 1;
1244                 intr_info->idl = kcalloc(intr_info->num,
1245                                         sizeof(struct bna_intr_descr),
1246                                         GFP_KERNEL);
1247                 if (!intr_info->idl)
1248                         return -ENOMEM;
1249
1250                 switch (src) {
1251                 case BNAD_INTR_TX:
1252                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1253                         break;
1254
1255                 case BNAD_INTR_RX:
1256                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1257                         break;
1258                 }
1259         }
1260         return 0;
1261 }
1262
1263 /**
1264  * NOTE: Should be called for MSIX only
1265  * Unregisters Tx MSIX vector(s) from the kernel
1266  */
1267 static void
1268 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1269                         int num_txqs)
1270 {
1271         int i;
1272         int vector_num;
1273
1274         for (i = 0; i < num_txqs; i++) {
1275                 if (tx_info->tcb[i] == NULL)
1276                         continue;
1277
1278                 vector_num = tx_info->tcb[i]->intr_vector;
1279                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1280         }
1281 }
1282
1283 /**
1284  * NOTE: Should be called for MSIX only
1285  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1286  */
1287 static int
1288 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1289                         u32 tx_id, int num_txqs)
1290 {
1291         int i;
1292         int err;
1293         int vector_num;
1294
1295         for (i = 0; i < num_txqs; i++) {
1296                 vector_num = tx_info->tcb[i]->intr_vector;
1297                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1298                                 tx_id + tx_info->tcb[i]->id);
1299                 err = request_irq(bnad->msix_table[vector_num].vector,
1300                                   (irq_handler_t)bnad_msix_tx, 0,
1301                                   tx_info->tcb[i]->name,
1302                                   tx_info->tcb[i]);
1303                 if (err)
1304                         goto err_return;
1305         }
1306
1307         return 0;
1308
1309 err_return:
1310         if (i > 0)
1311                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1312         return -1;
1313 }
1314
1315 /**
1316  * NOTE: Should be called for MSIX only
1317  * Unregisters Rx MSIX vector(s) from the kernel
1318  */
1319 static void
1320 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1321                         int num_rxps)
1322 {
1323         int i;
1324         int vector_num;
1325
1326         for (i = 0; i < num_rxps; i++) {
1327                 if (rx_info->rx_ctrl[i].ccb == NULL)
1328                         continue;
1329
1330                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1331                 free_irq(bnad->msix_table[vector_num].vector,
1332                          rx_info->rx_ctrl[i].ccb);
1333         }
1334 }
1335
1336 /**
1337  * NOTE: Should be called for MSIX only
1338  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1339  */
1340 static int
1341 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1342                         u32 rx_id, int num_rxps)
1343 {
1344         int i;
1345         int err;
1346         int vector_num;
1347
1348         for (i = 0; i < num_rxps; i++) {
1349                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1350                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1351                         bnad->netdev->name,
1352                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1353                 err = request_irq(bnad->msix_table[vector_num].vector,
1354                                   (irq_handler_t)bnad_msix_rx, 0,
1355                                   rx_info->rx_ctrl[i].ccb->name,
1356                                   rx_info->rx_ctrl[i].ccb);
1357                 if (err)
1358                         goto err_return;
1359         }
1360
1361         return 0;
1362
1363 err_return:
1364         if (i > 0)
1365                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1366         return -1;
1367 }
1368
1369 /* Free Tx object Resources */
1370 static void
1371 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1372 {
1373         int i;
1374
1375         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1376                 if (res_info[i].res_type == BNA_RES_T_MEM)
1377                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1378                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1379                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1380         }
1381 }
1382
1383 /* Allocates memory and interrupt resources for Tx object */
1384 static int
1385 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1386                   u32 tx_id)
1387 {
1388         int i, err = 0;
1389
1390         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1391                 if (res_info[i].res_type == BNA_RES_T_MEM)
1392                         err = bnad_mem_alloc(bnad,
1393                                         &res_info[i].res_u.mem_info);
1394                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1395                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1396                                         &res_info[i].res_u.intr_info);
1397                 if (err)
1398                         goto err_return;
1399         }
1400         return 0;
1401
1402 err_return:
1403         bnad_tx_res_free(bnad, res_info);
1404         return err;
1405 }
1406
1407 /* Free Rx object Resources */
1408 static void
1409 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1410 {
1411         int i;
1412
1413         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1414                 if (res_info[i].res_type == BNA_RES_T_MEM)
1415                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1416                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1417                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1418         }
1419 }
1420
1421 /* Allocates memory and interrupt resources for Rx object */
1422 static int
1423 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1424                   uint rx_id)
1425 {
1426         int i, err = 0;
1427
1428         /* All memory needs to be allocated before setup_ccbs */
1429         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1430                 if (res_info[i].res_type == BNA_RES_T_MEM)
1431                         err = bnad_mem_alloc(bnad,
1432                                         &res_info[i].res_u.mem_info);
1433                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1434                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1435                                         &res_info[i].res_u.intr_info);
1436                 if (err)
1437                         goto err_return;
1438         }
1439         return 0;
1440
1441 err_return:
1442         bnad_rx_res_free(bnad, res_info);
1443         return err;
1444 }
1445
1446 /* Timer callbacks */
1447 /* a) IOC timer */
1448 static void
1449 bnad_ioc_timeout(unsigned long data)
1450 {
1451         struct bnad *bnad = (struct bnad *)data;
1452         unsigned long flags;
1453
1454         spin_lock_irqsave(&bnad->bna_lock, flags);
1455         bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1456         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1457 }
1458
1459 static void
1460 bnad_ioc_hb_check(unsigned long data)
1461 {
1462         struct bnad *bnad = (struct bnad *)data;
1463         unsigned long flags;
1464
1465         spin_lock_irqsave(&bnad->bna_lock, flags);
1466         bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1467         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1468 }
1469
1470 static void
1471 bnad_iocpf_timeout(unsigned long data)
1472 {
1473         struct bnad *bnad = (struct bnad *)data;
1474         unsigned long flags;
1475
1476         spin_lock_irqsave(&bnad->bna_lock, flags);
1477         bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1478         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1479 }
1480
1481 static void
1482 bnad_iocpf_sem_timeout(unsigned long data)
1483 {
1484         struct bnad *bnad = (struct bnad *)data;
1485         unsigned long flags;
1486
1487         spin_lock_irqsave(&bnad->bna_lock, flags);
1488         bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1489         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1490 }
1491
1492 /*
1493  * All timer routines use bnad->bna_lock to protect against
1494  * the following race, which may occur in case of no locking:
1495  *      Time    CPU m   CPU n
1496  *      0       1 = test_bit
1497  *      1                       clear_bit
1498  *      2                       del_timer_sync
1499  *      3       mod_timer
1500  */
1501
1502 /* b) Dynamic Interrupt Moderation Timer */
1503 static void
1504 bnad_dim_timeout(unsigned long data)
1505 {
1506         struct bnad *bnad = (struct bnad *)data;
1507         struct bnad_rx_info *rx_info;
1508         struct bnad_rx_ctrl *rx_ctrl;
1509         int i, j;
1510         unsigned long flags;
1511
1512         if (!netif_carrier_ok(bnad->netdev))
1513                 return;
1514
1515         spin_lock_irqsave(&bnad->bna_lock, flags);
1516         for (i = 0; i < bnad->num_rx; i++) {
1517                 rx_info = &bnad->rx_info[i];
1518                 if (!rx_info->rx)
1519                         continue;
1520                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1521                         rx_ctrl = &rx_info->rx_ctrl[j];
1522                         if (!rx_ctrl->ccb)
1523                                 continue;
1524                         bna_rx_dim_update(rx_ctrl->ccb);
1525                 }
1526         }
1527
1528         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1529         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1530                 mod_timer(&bnad->dim_timer,
1531                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1532         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1533 }
1534
1535 /* c)  Statistics Timer */
1536 static void
1537 bnad_stats_timeout(unsigned long data)
1538 {
1539         struct bnad *bnad = (struct bnad *)data;
1540         unsigned long flags;
1541
1542         if (!netif_running(bnad->netdev) ||
1543                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1544                 return;
1545
1546         spin_lock_irqsave(&bnad->bna_lock, flags);
1547         bna_hw_stats_get(&bnad->bna);
1548         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1549 }
1550
1551 /*
1552  * Set up timer for DIM
1553  * Called with bnad->bna_lock held
1554  */
1555 void
1556 bnad_dim_timer_start(struct bnad *bnad)
1557 {
1558         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1559             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1560                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1561                             (unsigned long)bnad);
1562                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1563                 mod_timer(&bnad->dim_timer,
1564                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1565         }
1566 }
1567
1568 /*
1569  * Set up timer for statistics
1570  * Called with mutex_lock(&bnad->conf_mutex) held
1571  */
1572 static void
1573 bnad_stats_timer_start(struct bnad *bnad)
1574 {
1575         unsigned long flags;
1576
1577         spin_lock_irqsave(&bnad->bna_lock, flags);
1578         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1579                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1580                             (unsigned long)bnad);
1581                 mod_timer(&bnad->stats_timer,
1582                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1583         }
1584         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1585 }
1586
1587 /*
1588  * Stops the stats timer
1589  * Called with mutex_lock(&bnad->conf_mutex) held
1590  */
1591 static void
1592 bnad_stats_timer_stop(struct bnad *bnad)
1593 {
1594         int to_del = 0;
1595         unsigned long flags;
1596
1597         spin_lock_irqsave(&bnad->bna_lock, flags);
1598         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1599                 to_del = 1;
1600         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1601         if (to_del)
1602                 del_timer_sync(&bnad->stats_timer);
1603 }
1604
1605 /* Utilities */
1606
1607 static void
1608 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1609 {
1610         int i = 1; /* Index 0 has broadcast address */
1611         struct netdev_hw_addr *mc_addr;
1612
1613         netdev_for_each_mc_addr(mc_addr, netdev) {
1614                 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1615                                                         ETH_ALEN);
1616                 i++;
1617         }
1618 }
1619
1620 static int
1621 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1622 {
1623         struct bnad_rx_ctrl *rx_ctrl =
1624                 container_of(napi, struct bnad_rx_ctrl, napi);
1625         struct bnad *bnad = rx_ctrl->bnad;
1626         int rcvd = 0;
1627
1628         rx_ctrl->rx_poll_ctr++;
1629
1630         if (!netif_carrier_ok(bnad->netdev))
1631                 goto poll_exit;
1632
1633         rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
1634         if (rcvd >= budget)
1635                 return rcvd;
1636
1637 poll_exit:
1638         napi_complete(napi);
1639
1640         rx_ctrl->rx_complete++;
1641
1642         if (rx_ctrl->ccb)
1643                 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1644
1645         return rcvd;
1646 }
1647
1648 #define BNAD_NAPI_POLL_QUOTA            64
1649 static void
1650 bnad_napi_init(struct bnad *bnad, u32 rx_id)
1651 {
1652         struct bnad_rx_ctrl *rx_ctrl;
1653         int i;
1654
1655         /* Initialize & enable NAPI */
1656         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1657                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1658                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1659                                bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1660         }
1661 }
1662
1663 static void
1664 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1665 {
1666         struct bnad_rx_ctrl *rx_ctrl;
1667         int i;
1668
1669         /* Initialize & enable NAPI */
1670         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1671                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1672
1673                 napi_enable(&rx_ctrl->napi);
1674         }
1675 }
1676
1677 static void
1678 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1679 {
1680         int i;
1681
1682         /* First disable and then clean up */
1683         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1684                 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1685                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1686         }
1687 }
1688
1689 /* Should be held with conf_lock held */
1690 void
1691 bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1692 {
1693         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1694         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1695         unsigned long flags;
1696
1697         if (!tx_info->tx)
1698                 return;
1699
1700         init_completion(&bnad->bnad_completions.tx_comp);
1701         spin_lock_irqsave(&bnad->bna_lock, flags);
1702         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1703         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1704         wait_for_completion(&bnad->bnad_completions.tx_comp);
1705
1706         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1707                 bnad_tx_msix_unregister(bnad, tx_info,
1708                         bnad->num_txq_per_tx);
1709
1710         if (0 == tx_id)
1711                 tasklet_kill(&bnad->tx_free_tasklet);
1712
1713         spin_lock_irqsave(&bnad->bna_lock, flags);
1714         bna_tx_destroy(tx_info->tx);
1715         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1716
1717         tx_info->tx = NULL;
1718         tx_info->tx_id = 0;
1719
1720         bnad_tx_res_free(bnad, res_info);
1721 }
1722
1723 /* Should be held with conf_lock held */
1724 int
1725 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1726 {
1727         int err;
1728         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1729         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1730         struct bna_intr_info *intr_info =
1731                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1732         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1733         struct bna_tx_event_cbfn tx_cbfn;
1734         struct bna_tx *tx;
1735         unsigned long flags;
1736
1737         tx_info->tx_id = tx_id;
1738
1739         /* Initialize the Tx object configuration */
1740         tx_config->num_txq = bnad->num_txq_per_tx;
1741         tx_config->txq_depth = bnad->txq_depth;
1742         tx_config->tx_type = BNA_TX_T_REGULAR;
1743         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1744
1745         /* Initialize the tx event handlers */
1746         tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1747         tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1748         tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1749         tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1750         tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1751
1752         /* Get BNA's resource requirement for one tx object */
1753         spin_lock_irqsave(&bnad->bna_lock, flags);
1754         bna_tx_res_req(bnad->num_txq_per_tx,
1755                 bnad->txq_depth, res_info);
1756         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1757
1758         /* Fill Unmap Q memory requirements */
1759         BNAD_FILL_UNMAPQ_MEM_REQ(
1760                         &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1761                         bnad->num_txq_per_tx,
1762                         BNAD_TX_UNMAPQ_DEPTH);
1763
1764         /* Allocate resources */
1765         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1766         if (err)
1767                 return err;
1768
1769         /* Ask BNA to create one Tx object, supplying required resources */
1770         spin_lock_irqsave(&bnad->bna_lock, flags);
1771         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1772                         tx_info);
1773         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1774         if (!tx)
1775                 goto err_return;
1776         tx_info->tx = tx;
1777
1778         /* Register ISR for the Tx object */
1779         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1780                 err = bnad_tx_msix_register(bnad, tx_info,
1781                         tx_id, bnad->num_txq_per_tx);
1782                 if (err)
1783                         goto err_return;
1784         }
1785
1786         spin_lock_irqsave(&bnad->bna_lock, flags);
1787         bna_tx_enable(tx);
1788         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1789
1790         return 0;
1791
1792 err_return:
1793         bnad_tx_res_free(bnad, res_info);
1794         return err;
1795 }
1796
1797 /* Setup the rx config for bna_rx_create */
1798 /* bnad decides the configuration */
1799 static void
1800 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1801 {
1802         rx_config->rx_type = BNA_RX_T_REGULAR;
1803         rx_config->num_paths = bnad->num_rxp_per_rx;
1804         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1805
1806         if (bnad->num_rxp_per_rx > 1) {
1807                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1808                 rx_config->rss_config.hash_type =
1809                                 (BFI_ENET_RSS_IPV6 |
1810                                  BFI_ENET_RSS_IPV6_TCP |
1811                                  BFI_ENET_RSS_IPV4 |
1812                                  BFI_ENET_RSS_IPV4_TCP);
1813                 rx_config->rss_config.hash_mask =
1814                                 bnad->num_rxp_per_rx - 1;
1815                 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1816                         sizeof(rx_config->rss_config.toeplitz_hash_key));
1817         } else {
1818                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1819                 memset(&rx_config->rss_config, 0,
1820                        sizeof(rx_config->rss_config));
1821         }
1822         rx_config->rxp_type = BNA_RXP_SLR;
1823         rx_config->q_depth = bnad->rxq_depth;
1824
1825         rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1826
1827         rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1828 }
1829
1830 static void
1831 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1832 {
1833         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1834         int i;
1835
1836         for (i = 0; i < bnad->num_rxp_per_rx; i++)
1837                 rx_info->rx_ctrl[i].bnad = bnad;
1838 }
1839
1840 /* Called with mutex_lock(&bnad->conf_mutex) held */
1841 void
1842 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1843 {
1844         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1845         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1846         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1847         unsigned long flags;
1848         int to_del = 0;
1849
1850         if (!rx_info->rx)
1851                 return;
1852
1853         if (0 == rx_id) {
1854                 spin_lock_irqsave(&bnad->bna_lock, flags);
1855                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1856                     test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1857                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1858                         to_del = 1;
1859                 }
1860                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1861                 if (to_del)
1862                         del_timer_sync(&bnad->dim_timer);
1863         }
1864
1865         init_completion(&bnad->bnad_completions.rx_comp);
1866         spin_lock_irqsave(&bnad->bna_lock, flags);
1867         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1868         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1869         wait_for_completion(&bnad->bnad_completions.rx_comp);
1870
1871         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1872                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1873
1874         bnad_napi_disable(bnad, rx_id);
1875
1876         spin_lock_irqsave(&bnad->bna_lock, flags);
1877         bna_rx_destroy(rx_info->rx);
1878         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1879
1880         rx_info->rx = NULL;
1881         rx_info->rx_id = 0;
1882
1883         bnad_rx_res_free(bnad, res_info);
1884 }
1885
1886 /* Called with mutex_lock(&bnad->conf_mutex) held */
1887 int
1888 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1889 {
1890         int err;
1891         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1892         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1893         struct bna_intr_info *intr_info =
1894                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1895         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1896         struct bna_rx_event_cbfn rx_cbfn;
1897         struct bna_rx *rx;
1898         unsigned long flags;
1899
1900         rx_info->rx_id = rx_id;
1901
1902         /* Initialize the Rx object configuration */
1903         bnad_init_rx_config(bnad, rx_config);
1904
1905         /* Initialize the Rx event handlers */
1906         rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1907         rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1908         rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1909         rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1910         rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1911         rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1912
1913         /* Get BNA's resource requirement for one Rx object */
1914         spin_lock_irqsave(&bnad->bna_lock, flags);
1915         bna_rx_res_req(rx_config, res_info);
1916         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1917
1918         /* Fill Unmap Q memory requirements */
1919         BNAD_FILL_UNMAPQ_MEM_REQ(
1920                         &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1921                         rx_config->num_paths +
1922                         ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1923                                 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1924
1925         /* Allocate resource */
1926         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1927         if (err)
1928                 return err;
1929
1930         bnad_rx_ctrl_init(bnad, rx_id);
1931
1932         /* Ask BNA to create one Rx object, supplying required resources */
1933         spin_lock_irqsave(&bnad->bna_lock, flags);
1934         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1935                         rx_info);
1936         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1937         if (!rx) {
1938                 err = -ENOMEM;
1939                 goto err_return;
1940         }
1941         rx_info->rx = rx;
1942
1943         /*
1944          * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1945          * so that IRQ handler cannot schedule NAPI at this point.
1946          */
1947         bnad_napi_init(bnad, rx_id);
1948
1949         /* Register ISR for the Rx object */
1950         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1951                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1952                                                 rx_config->num_paths);
1953                 if (err)
1954                         goto err_return;
1955         }
1956
1957         spin_lock_irqsave(&bnad->bna_lock, flags);
1958         if (0 == rx_id) {
1959                 /* Set up Dynamic Interrupt Moderation Vector */
1960                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1961                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1962
1963                 /* Enable VLAN filtering only on the default Rx */
1964                 bna_rx_vlanfilter_enable(rx);
1965
1966                 /* Start the DIM timer */
1967                 bnad_dim_timer_start(bnad);
1968         }
1969
1970         bna_rx_enable(rx);
1971         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1972
1973         /* Enable scheduling of NAPI */
1974         bnad_napi_enable(bnad, rx_id);
1975
1976         return 0;
1977
1978 err_return:
1979         bnad_cleanup_rx(bnad, rx_id);
1980         return err;
1981 }
1982
1983 /* Called with conf_lock & bnad->bna_lock held */
1984 void
1985 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1986 {
1987         struct bnad_tx_info *tx_info;
1988
1989         tx_info = &bnad->tx_info[0];
1990         if (!tx_info->tx)
1991                 return;
1992
1993         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1994 }
1995
1996 /* Called with conf_lock & bnad->bna_lock held */
1997 void
1998 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1999 {
2000         struct bnad_rx_info *rx_info;
2001         int     i;
2002
2003         for (i = 0; i < bnad->num_rx; i++) {
2004                 rx_info = &bnad->rx_info[i];
2005                 if (!rx_info->rx)
2006                         continue;
2007                 bna_rx_coalescing_timeo_set(rx_info->rx,
2008                                 bnad->rx_coalescing_timeo);
2009         }
2010 }
2011
2012 /*
2013  * Called with bnad->bna_lock held
2014  */
2015 int
2016 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2017 {
2018         int ret;
2019
2020         if (!is_valid_ether_addr(mac_addr))
2021                 return -EADDRNOTAVAIL;
2022
2023         /* If datapath is down, pretend everything went through */
2024         if (!bnad->rx_info[0].rx)
2025                 return 0;
2026
2027         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2028         if (ret != BNA_CB_SUCCESS)
2029                 return -EADDRNOTAVAIL;
2030
2031         return 0;
2032 }
2033
2034 /* Should be called with conf_lock held */
2035 int
2036 bnad_enable_default_bcast(struct bnad *bnad)
2037 {
2038         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2039         int ret;
2040         unsigned long flags;
2041
2042         init_completion(&bnad->bnad_completions.mcast_comp);
2043
2044         spin_lock_irqsave(&bnad->bna_lock, flags);
2045         ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2046                                 bnad_cb_rx_mcast_add);
2047         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2048
2049         if (ret == BNA_CB_SUCCESS)
2050                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2051         else
2052                 return -ENODEV;
2053
2054         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2055                 return -ENODEV;
2056
2057         return 0;
2058 }
2059
2060 /* Called with mutex_lock(&bnad->conf_mutex) held */
2061 void
2062 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2063 {
2064         u16 vid;
2065         unsigned long flags;
2066
2067         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2068                 spin_lock_irqsave(&bnad->bna_lock, flags);
2069                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2070                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2071         }
2072 }
2073
2074 /* Statistics utilities */
2075 void
2076 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2077 {
2078         int i, j;
2079
2080         for (i = 0; i < bnad->num_rx; i++) {
2081                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2082                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2083                                 stats->rx_packets += bnad->rx_info[i].
2084                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2085                                 stats->rx_bytes += bnad->rx_info[i].
2086                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2087                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2088                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2089                                         rcb[1]->rxq) {
2090                                         stats->rx_packets +=
2091                                                 bnad->rx_info[i].rx_ctrl[j].
2092                                                 ccb->rcb[1]->rxq->rx_packets;
2093                                         stats->rx_bytes +=
2094                                                 bnad->rx_info[i].rx_ctrl[j].
2095                                                 ccb->rcb[1]->rxq->rx_bytes;
2096                                 }
2097                         }
2098                 }
2099         }
2100         for (i = 0; i < bnad->num_tx; i++) {
2101                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2102                         if (bnad->tx_info[i].tcb[j]) {
2103                                 stats->tx_packets +=
2104                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2105                                 stats->tx_bytes +=
2106                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2107                         }
2108                 }
2109         }
2110 }
2111
2112 /*
2113  * Must be called with the bna_lock held.
2114  */
2115 void
2116 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2117 {
2118         struct bfi_enet_stats_mac *mac_stats;
2119         u32 bmap;
2120         int i;
2121
2122         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2123         stats->rx_errors =
2124                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2125                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2126                 mac_stats->rx_undersize;
2127         stats->tx_errors = mac_stats->tx_fcs_error +
2128                                         mac_stats->tx_undersize;
2129         stats->rx_dropped = mac_stats->rx_drop;
2130         stats->tx_dropped = mac_stats->tx_drop;
2131         stats->multicast = mac_stats->rx_multicast;
2132         stats->collisions = mac_stats->tx_total_collision;
2133
2134         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2135
2136         /* receive ring buffer overflow  ?? */
2137
2138         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2139         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2140         /* recv'r fifo overrun */
2141         bmap = bna_rx_rid_mask(&bnad->bna);
2142         for (i = 0; bmap; i++) {
2143                 if (bmap & 1) {
2144                         stats->rx_fifo_errors +=
2145                                 bnad->stats.bna_stats->
2146                                         hw_stats.rxf_stats[i].frame_drops;
2147                         break;
2148                 }
2149                 bmap >>= 1;
2150         }
2151 }
2152
2153 static void
2154 bnad_mbox_irq_sync(struct bnad *bnad)
2155 {
2156         u32 irq;
2157         unsigned long flags;
2158
2159         spin_lock_irqsave(&bnad->bna_lock, flags);
2160         if (bnad->cfg_flags & BNAD_CF_MSIX)
2161                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2162         else
2163                 irq = bnad->pcidev->irq;
2164         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2165
2166         synchronize_irq(irq);
2167 }
2168
2169 /* Utility used by bnad_start_xmit, for doing TSO */
2170 static int
2171 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2172 {
2173         int err;
2174
2175         if (skb_header_cloned(skb)) {
2176                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2177                 if (err) {
2178                         BNAD_UPDATE_CTR(bnad, tso_err);
2179                         return err;
2180                 }
2181         }
2182
2183         /*
2184          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2185          * excluding the length field.
2186          */
2187         if (skb->protocol == htons(ETH_P_IP)) {
2188                 struct iphdr *iph = ip_hdr(skb);
2189
2190                 /* Do we really need these? */
2191                 iph->tot_len = 0;
2192                 iph->check = 0;
2193
2194                 tcp_hdr(skb)->check =
2195                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2196                                            IPPROTO_TCP, 0);
2197                 BNAD_UPDATE_CTR(bnad, tso4);
2198         } else {
2199                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2200
2201                 ipv6h->payload_len = 0;
2202                 tcp_hdr(skb)->check =
2203                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2204                                          IPPROTO_TCP, 0);
2205                 BNAD_UPDATE_CTR(bnad, tso6);
2206         }
2207
2208         return 0;
2209 }
2210
2211 /*
2212  * Initialize Q numbers depending on Rx Paths
2213  * Called with bnad->bna_lock held, because of cfg_flags
2214  * access.
2215  */
2216 static void
2217 bnad_q_num_init(struct bnad *bnad)
2218 {
2219         int rxps;
2220
2221         rxps = min((uint)num_online_cpus(),
2222                         (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2223
2224         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2225                 rxps = 1;       /* INTx */
2226
2227         bnad->num_rx = 1;
2228         bnad->num_tx = 1;
2229         bnad->num_rxp_per_rx = rxps;
2230         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2231 }
2232
2233 /*
2234  * Adjusts the Q numbers, given a number of msix vectors
2235  * Give preference to RSS as opposed to Tx priority Queues,
2236  * in such a case, just use 1 Tx Q
2237  * Called with bnad->bna_lock held b'cos of cfg_flags access
2238  */
2239 static void
2240 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2241 {
2242         bnad->num_txq_per_tx = 1;
2243         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2244              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2245             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2246                 bnad->num_rxp_per_rx = msix_vectors -
2247                         (bnad->num_tx * bnad->num_txq_per_tx) -
2248                         BNAD_MAILBOX_MSIX_VECTORS;
2249         } else
2250                 bnad->num_rxp_per_rx = 1;
2251 }
2252
2253 /* Enable / disable ioceth */
2254 static int
2255 bnad_ioceth_disable(struct bnad *bnad)
2256 {
2257         unsigned long flags;
2258         int err = 0;
2259
2260         spin_lock_irqsave(&bnad->bna_lock, flags);
2261         init_completion(&bnad->bnad_completions.ioc_comp);
2262         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2263         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2264
2265         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2266                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2267
2268         err = bnad->bnad_completions.ioc_comp_status;
2269         return err;
2270 }
2271
2272 static int
2273 bnad_ioceth_enable(struct bnad *bnad)
2274 {
2275         int err = 0;
2276         unsigned long flags;
2277
2278         spin_lock_irqsave(&bnad->bna_lock, flags);
2279         init_completion(&bnad->bnad_completions.ioc_comp);
2280         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2281         bna_ioceth_enable(&bnad->bna.ioceth);
2282         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2283
2284         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2285                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2286
2287         err = bnad->bnad_completions.ioc_comp_status;
2288
2289         return err;
2290 }
2291
2292 /* Free BNA resources */
2293 static void
2294 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2295                 u32 res_val_max)
2296 {
2297         int i;
2298
2299         for (i = 0; i < res_val_max; i++)
2300                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2301 }
2302
2303 /* Allocates memory and interrupt resources for BNA */
2304 static int
2305 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2306                 u32 res_val_max)
2307 {
2308         int i, err;
2309
2310         for (i = 0; i < res_val_max; i++) {
2311                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2312                 if (err)
2313                         goto err_return;
2314         }
2315         return 0;
2316
2317 err_return:
2318         bnad_res_free(bnad, res_info, res_val_max);
2319         return err;
2320 }
2321
2322 /* Interrupt enable / disable */
2323 static void
2324 bnad_enable_msix(struct bnad *bnad)
2325 {
2326         int i, ret;
2327         unsigned long flags;
2328
2329         spin_lock_irqsave(&bnad->bna_lock, flags);
2330         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2331                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2332                 return;
2333         }
2334         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2335
2336         if (bnad->msix_table)
2337                 return;
2338
2339         bnad->msix_table =
2340                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2341
2342         if (!bnad->msix_table)
2343                 goto intx_mode;
2344
2345         for (i = 0; i < bnad->msix_num; i++)
2346                 bnad->msix_table[i].entry = i;
2347
2348         ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2349         if (ret > 0) {
2350                 /* Not enough MSI-X vectors. */
2351                 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2352                         ret, bnad->msix_num);
2353
2354                 spin_lock_irqsave(&bnad->bna_lock, flags);
2355                 /* ret = #of vectors that we got */
2356                 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2357                         (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2358                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2359
2360                 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2361                          BNAD_MAILBOX_MSIX_VECTORS;
2362
2363                 if (bnad->msix_num > ret)
2364                         goto intx_mode;
2365
2366                 /* Try once more with adjusted numbers */
2367                 /* If this fails, fall back to INTx */
2368                 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2369                                       bnad->msix_num);
2370                 if (ret)
2371                         goto intx_mode;
2372
2373         } else if (ret < 0)
2374                 goto intx_mode;
2375
2376         pci_intx(bnad->pcidev, 0);
2377
2378         return;
2379
2380 intx_mode:
2381         pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2382
2383         kfree(bnad->msix_table);
2384         bnad->msix_table = NULL;
2385         bnad->msix_num = 0;
2386         spin_lock_irqsave(&bnad->bna_lock, flags);
2387         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2388         bnad_q_num_init(bnad);
2389         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2390 }
2391
2392 static void
2393 bnad_disable_msix(struct bnad *bnad)
2394 {
2395         u32 cfg_flags;
2396         unsigned long flags;
2397
2398         spin_lock_irqsave(&bnad->bna_lock, flags);
2399         cfg_flags = bnad->cfg_flags;
2400         if (bnad->cfg_flags & BNAD_CF_MSIX)
2401                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2402         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2403
2404         if (cfg_flags & BNAD_CF_MSIX) {
2405                 pci_disable_msix(bnad->pcidev);
2406                 kfree(bnad->msix_table);
2407                 bnad->msix_table = NULL;
2408         }
2409 }
2410
2411 /* Netdev entry points */
2412 static int
2413 bnad_open(struct net_device *netdev)
2414 {
2415         int err;
2416         struct bnad *bnad = netdev_priv(netdev);
2417         struct bna_pause_config pause_config;
2418         int mtu;
2419         unsigned long flags;
2420
2421         mutex_lock(&bnad->conf_mutex);
2422
2423         /* Tx */
2424         err = bnad_setup_tx(bnad, 0);
2425         if (err)
2426                 goto err_return;
2427
2428         /* Rx */
2429         err = bnad_setup_rx(bnad, 0);
2430         if (err)
2431                 goto cleanup_tx;
2432
2433         /* Port */
2434         pause_config.tx_pause = 0;
2435         pause_config.rx_pause = 0;
2436
2437         mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2438
2439         spin_lock_irqsave(&bnad->bna_lock, flags);
2440         bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2441         bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2442         bna_enet_enable(&bnad->bna.enet);
2443         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2444
2445         /* Enable broadcast */
2446         bnad_enable_default_bcast(bnad);
2447
2448         /* Restore VLANs, if any */
2449         bnad_restore_vlans(bnad, 0);
2450
2451         /* Set the UCAST address */
2452         spin_lock_irqsave(&bnad->bna_lock, flags);
2453         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2454         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2455
2456         /* Start the stats timer */
2457         bnad_stats_timer_start(bnad);
2458
2459         mutex_unlock(&bnad->conf_mutex);
2460
2461         return 0;
2462
2463 cleanup_tx:
2464         bnad_cleanup_tx(bnad, 0);
2465
2466 err_return:
2467         mutex_unlock(&bnad->conf_mutex);
2468         return err;
2469 }
2470
2471 static int
2472 bnad_stop(struct net_device *netdev)
2473 {
2474         struct bnad *bnad = netdev_priv(netdev);
2475         unsigned long flags;
2476
2477         mutex_lock(&bnad->conf_mutex);
2478
2479         /* Stop the stats timer */
2480         bnad_stats_timer_stop(bnad);
2481
2482         init_completion(&bnad->bnad_completions.enet_comp);
2483
2484         spin_lock_irqsave(&bnad->bna_lock, flags);
2485         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2486                         bnad_cb_enet_disabled);
2487         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2488
2489         wait_for_completion(&bnad->bnad_completions.enet_comp);
2490
2491         bnad_cleanup_tx(bnad, 0);
2492         bnad_cleanup_rx(bnad, 0);
2493
2494         /* Synchronize mailbox IRQ */
2495         bnad_mbox_irq_sync(bnad);
2496
2497         mutex_unlock(&bnad->conf_mutex);
2498
2499         return 0;
2500 }
2501
2502 /* TX */
2503 /*
2504  * bnad_start_xmit : Netdev entry point for Transmit
2505  *                   Called under lock held by net_device
2506  */
2507 static netdev_tx_t
2508 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2509 {
2510         struct bnad *bnad = netdev_priv(netdev);
2511         u32 txq_id = 0;
2512         struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2513
2514         u16             txq_prod, vlan_tag = 0;
2515         u32             unmap_prod, wis, wis_used, wi_range;
2516         u32             vectors, vect_id, i, acked;
2517         int                     err;
2518         unsigned int            len;
2519         u32                             gso_size;
2520
2521         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2522         dma_addr_t              dma_addr;
2523         struct bna_txq_entry *txqent;
2524         u16     flags;
2525
2526         if (unlikely(skb->len <= ETH_HLEN)) {
2527                 dev_kfree_skb(skb);
2528                 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2529                 return NETDEV_TX_OK;
2530         }
2531         if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2532                 dev_kfree_skb(skb);
2533                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2534                 return NETDEV_TX_OK;
2535         }
2536         if (unlikely(skb_headlen(skb) == 0)) {
2537                 dev_kfree_skb(skb);
2538                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2539                 return NETDEV_TX_OK;
2540         }
2541
2542         /*
2543          * Takes care of the Tx that is scheduled between clearing the flag
2544          * and the netif_tx_stop_all_queues() call.
2545          */
2546         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2547                 dev_kfree_skb(skb);
2548                 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2549                 return NETDEV_TX_OK;
2550         }
2551
2552         vectors = 1 + skb_shinfo(skb)->nr_frags;
2553         if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2554                 dev_kfree_skb(skb);
2555                 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2556                 return NETDEV_TX_OK;
2557         }
2558         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2559         acked = 0;
2560         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2561                         vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2562                 if ((u16) (*tcb->hw_consumer_index) !=
2563                     tcb->consumer_index &&
2564                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2565                         acked = bnad_free_txbufs(bnad, tcb);
2566                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2567                                 bna_ib_ack(tcb->i_dbell, acked);
2568                         smp_mb__before_clear_bit();
2569                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2570                 } else {
2571                         netif_stop_queue(netdev);
2572                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2573                 }
2574
2575                 smp_mb();
2576                 /*
2577                  * Check again to deal with race condition between
2578                  * netif_stop_queue here, and netif_wake_queue in
2579                  * interrupt handler which is not inside netif tx lock.
2580                  */
2581                 if (likely
2582                     (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2583                      vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2584                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2585                         return NETDEV_TX_BUSY;
2586                 } else {
2587                         netif_wake_queue(netdev);
2588                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2589                 }
2590         }
2591
2592         unmap_prod = unmap_q->producer_index;
2593         flags = 0;
2594
2595         txq_prod = tcb->producer_index;
2596         BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2597         txqent->hdr.wi.reserved = 0;
2598         txqent->hdr.wi.num_vectors = vectors;
2599
2600         if (vlan_tx_tag_present(skb)) {
2601                 vlan_tag = (u16) vlan_tx_tag_get(skb);
2602                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2603         }
2604         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2605                 vlan_tag =
2606                         (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2607                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2608         }
2609
2610         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2611
2612         if (skb_is_gso(skb)) {
2613                 gso_size = skb_shinfo(skb)->gso_size;
2614
2615                 if (unlikely(gso_size > netdev->mtu)) {
2616                         dev_kfree_skb(skb);
2617                         BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2618                         return NETDEV_TX_OK;
2619                 }
2620                 if (unlikely((gso_size + skb_transport_offset(skb) +
2621                         tcp_hdrlen(skb)) >= skb->len)) {
2622                         txqent->hdr.wi.opcode =
2623                                 __constant_htons(BNA_TXQ_WI_SEND);
2624                         txqent->hdr.wi.lso_mss = 0;
2625                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2626                 } else {
2627                         txqent->hdr.wi.opcode =
2628                                 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2629                         txqent->hdr.wi.lso_mss = htons(gso_size);
2630                 }
2631
2632                 err = bnad_tso_prepare(bnad, skb);
2633                 if (unlikely(err)) {
2634                         dev_kfree_skb(skb);
2635                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2636                         return NETDEV_TX_OK;
2637                 }
2638                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2639                 txqent->hdr.wi.l4_hdr_size_n_offset =
2640                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2641                               (tcp_hdrlen(skb) >> 2,
2642                                skb_transport_offset(skb)));
2643         } else {
2644                 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2645                 txqent->hdr.wi.lso_mss = 0;
2646
2647                 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2648                         dev_kfree_skb(skb);
2649                         BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2650                         return NETDEV_TX_OK;
2651                 }
2652
2653                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2654                         u8 proto = 0;
2655
2656                         if (skb->protocol == __constant_htons(ETH_P_IP))
2657                                 proto = ip_hdr(skb)->protocol;
2658                         else if (skb->protocol ==
2659                                  __constant_htons(ETH_P_IPV6)) {
2660                                 /* nexthdr may not be TCP immediately. */
2661                                 proto = ipv6_hdr(skb)->nexthdr;
2662                         }
2663                         if (proto == IPPROTO_TCP) {
2664                                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2665                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2666                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2667                                               (0, skb_transport_offset(skb)));
2668
2669                                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2670
2671                                 if (unlikely(skb_headlen(skb) <
2672                                 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2673                                         dev_kfree_skb(skb);
2674                                         BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2675                                         return NETDEV_TX_OK;
2676                                 }
2677
2678                         } else if (proto == IPPROTO_UDP) {
2679                                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2680                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2681                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2682                                               (0, skb_transport_offset(skb)));
2683
2684                                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2685                                 if (unlikely(skb_headlen(skb) <
2686                                     skb_transport_offset(skb) +
2687                                     sizeof(struct udphdr))) {
2688                                         dev_kfree_skb(skb);
2689                                         BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2690                                         return NETDEV_TX_OK;
2691                                 }
2692                         } else {
2693                                 dev_kfree_skb(skb);
2694                                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2695                                 return NETDEV_TX_OK;
2696                         }
2697                 } else {
2698                         txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2699                 }
2700         }
2701
2702         txqent->hdr.wi.flags = htons(flags);
2703
2704         txqent->hdr.wi.frame_length = htonl(skb->len);
2705
2706         unmap_q->unmap_array[unmap_prod].skb = skb;
2707         len = skb_headlen(skb);
2708         txqent->vector[0].length = htons(len);
2709         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2710                                   skb_headlen(skb), DMA_TO_DEVICE);
2711         dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2712                            dma_addr);
2713
2714         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2715         BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2716
2717         vect_id = 0;
2718         wis_used = 1;
2719
2720         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2721                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2722                 u16             size = frag->size;
2723
2724                 if (unlikely(size == 0)) {
2725                         unmap_prod = unmap_q->producer_index;
2726
2727                         unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2728                                            unmap_q->unmap_array,
2729                                            unmap_prod, unmap_q->q_depth, skb,
2730                                            i);
2731                         dev_kfree_skb(skb);
2732                         BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2733                         return NETDEV_TX_OK;
2734                 }
2735
2736                 len += size;
2737
2738                 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2739                         vect_id = 0;
2740                         if (--wi_range)
2741                                 txqent++;
2742                         else {
2743                                 BNA_QE_INDX_ADD(txq_prod, wis_used,
2744                                                 tcb->q_depth);
2745                                 wis_used = 0;
2746                                 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2747                                                      txqent, wi_range);
2748                         }
2749                         wis_used++;
2750                         txqent->hdr.wi_ext.opcode =
2751                                 __constant_htons(BNA_TXQ_WI_EXTENSION);
2752                 }
2753
2754                 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2755                 txqent->vector[vect_id].length = htons(size);
2756                 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2757                                             0, size, DMA_TO_DEVICE);
2758                 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2759                                    dma_addr);
2760                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2761                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2762         }
2763
2764         if (unlikely(len != skb->len)) {
2765                 unmap_prod = unmap_q->producer_index;
2766
2767                 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2768                                 unmap_q->unmap_array, unmap_prod,
2769                                 unmap_q->q_depth, skb,
2770                                 skb_shinfo(skb)->nr_frags);
2771                 dev_kfree_skb(skb);
2772                 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2773                 return NETDEV_TX_OK;
2774         }
2775
2776         unmap_q->producer_index = unmap_prod;
2777         BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2778         tcb->producer_index = txq_prod;
2779
2780         smp_mb();
2781
2782         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2783                 return NETDEV_TX_OK;
2784
2785         bna_txq_prod_indx_doorbell(tcb);
2786         smp_mb();
2787
2788         if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2789                 tasklet_schedule(&bnad->tx_free_tasklet);
2790
2791         return NETDEV_TX_OK;
2792 }
2793
2794 /*
2795  * Used spin_lock to synchronize reading of stats structures, which
2796  * is written by BNA under the same lock.
2797  */
2798 static struct rtnl_link_stats64 *
2799 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2800 {
2801         struct bnad *bnad = netdev_priv(netdev);
2802         unsigned long flags;
2803
2804         spin_lock_irqsave(&bnad->bna_lock, flags);
2805
2806         bnad_netdev_qstats_fill(bnad, stats);
2807         bnad_netdev_hwstats_fill(bnad, stats);
2808
2809         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2810
2811         return stats;
2812 }
2813
2814 void
2815 bnad_set_rx_mode(struct net_device *netdev)
2816 {
2817         struct bnad *bnad = netdev_priv(netdev);
2818         u32     new_mask, valid_mask;
2819         unsigned long flags;
2820
2821         spin_lock_irqsave(&bnad->bna_lock, flags);
2822
2823         new_mask = valid_mask = 0;
2824
2825         if (netdev->flags & IFF_PROMISC) {
2826                 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2827                         new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2828                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2829                         bnad->cfg_flags |= BNAD_CF_PROMISC;
2830                 }
2831         } else {
2832                 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2833                         new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2834                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2835                         bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2836                 }
2837         }
2838
2839         if (netdev->flags & IFF_ALLMULTI) {
2840                 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2841                         new_mask |= BNA_RXMODE_ALLMULTI;
2842                         valid_mask |= BNA_RXMODE_ALLMULTI;
2843                         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2844                 }
2845         } else {
2846                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2847                         new_mask &= ~BNA_RXMODE_ALLMULTI;
2848                         valid_mask |= BNA_RXMODE_ALLMULTI;
2849                         bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2850                 }
2851         }
2852
2853         if (bnad->rx_info[0].rx == NULL)
2854                 goto unlock;
2855
2856         bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2857
2858         if (!netdev_mc_empty(netdev)) {
2859                 u8 *mcaddr_list;
2860                 int mc_count = netdev_mc_count(netdev);
2861
2862                 /* Index 0 holds the broadcast address */
2863                 mcaddr_list =
2864                         kzalloc((mc_count + 1) * ETH_ALEN,
2865                                 GFP_ATOMIC);
2866                 if (!mcaddr_list)
2867                         goto unlock;
2868
2869                 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2870
2871                 /* Copy rest of the MC addresses */
2872                 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2873
2874                 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2875                                         mcaddr_list, NULL);
2876
2877                 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2878                 kfree(mcaddr_list);
2879         }
2880 unlock:
2881         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2882 }
2883
2884 /*
2885  * bna_lock is used to sync writes to netdev->addr
2886  * conf_lock cannot be used since this call may be made
2887  * in a non-blocking context.
2888  */
2889 static int
2890 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2891 {
2892         int err;
2893         struct bnad *bnad = netdev_priv(netdev);
2894         struct sockaddr *sa = (struct sockaddr *)mac_addr;
2895         unsigned long flags;
2896
2897         spin_lock_irqsave(&bnad->bna_lock, flags);
2898
2899         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2900
2901         if (!err)
2902                 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2903
2904         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2905
2906         return err;
2907 }
2908
2909 static int
2910 bnad_mtu_set(struct bnad *bnad, int mtu)
2911 {
2912         unsigned long flags;
2913
2914         init_completion(&bnad->bnad_completions.mtu_comp);
2915
2916         spin_lock_irqsave(&bnad->bna_lock, flags);
2917         bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2918         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2919
2920         wait_for_completion(&bnad->bnad_completions.mtu_comp);
2921
2922         return bnad->bnad_completions.mtu_comp_status;
2923 }
2924
2925 static int
2926 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2927 {
2928         int err, mtu = netdev->mtu;
2929         struct bnad *bnad = netdev_priv(netdev);
2930
2931         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2932                 return -EINVAL;
2933
2934         mutex_lock(&bnad->conf_mutex);
2935
2936         netdev->mtu = new_mtu;
2937
2938         mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2939         err = bnad_mtu_set(bnad, mtu);
2940         if (err)
2941                 err = -EBUSY;
2942
2943         mutex_unlock(&bnad->conf_mutex);
2944         return err;
2945 }
2946
2947 static void
2948 bnad_vlan_rx_add_vid(struct net_device *netdev,
2949                                  unsigned short vid)
2950 {
2951         struct bnad *bnad = netdev_priv(netdev);
2952         unsigned long flags;
2953
2954         if (!bnad->rx_info[0].rx)
2955                 return;
2956
2957         mutex_lock(&bnad->conf_mutex);
2958
2959         spin_lock_irqsave(&bnad->bna_lock, flags);
2960         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2961         set_bit(vid, bnad->active_vlans);
2962         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2963
2964         mutex_unlock(&bnad->conf_mutex);
2965 }
2966
2967 static void
2968 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2969                                   unsigned short vid)
2970 {
2971         struct bnad *bnad = netdev_priv(netdev);
2972         unsigned long flags;
2973
2974         if (!bnad->rx_info[0].rx)
2975                 return;
2976
2977         mutex_lock(&bnad->conf_mutex);
2978
2979         spin_lock_irqsave(&bnad->bna_lock, flags);
2980         clear_bit(vid, bnad->active_vlans);
2981         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2982         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2983
2984         mutex_unlock(&bnad->conf_mutex);
2985 }
2986
2987 #ifdef CONFIG_NET_POLL_CONTROLLER
2988 static void
2989 bnad_netpoll(struct net_device *netdev)
2990 {
2991         struct bnad *bnad = netdev_priv(netdev);
2992         struct bnad_rx_info *rx_info;
2993         struct bnad_rx_ctrl *rx_ctrl;
2994         u32 curr_mask;
2995         int i, j;
2996
2997         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2998                 bna_intx_disable(&bnad->bna, curr_mask);
2999                 bnad_isr(bnad->pcidev->irq, netdev);
3000                 bna_intx_enable(&bnad->bna, curr_mask);
3001         } else {
3002                 /*
3003                  * Tx processing may happen in sending context, so no need
3004                  * to explicitly process completions here
3005                  */
3006
3007                 /* Rx processing */
3008                 for (i = 0; i < bnad->num_rx; i++) {
3009                         rx_info = &bnad->rx_info[i];
3010                         if (!rx_info->rx)
3011                                 continue;
3012                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3013                                 rx_ctrl = &rx_info->rx_ctrl[j];
3014                                 if (rx_ctrl->ccb)
3015                                         bnad_netif_rx_schedule_poll(bnad,
3016                                                             rx_ctrl->ccb);
3017                         }
3018                 }
3019         }
3020 }
3021 #endif
3022
3023 static const struct net_device_ops bnad_netdev_ops = {
3024         .ndo_open               = bnad_open,
3025         .ndo_stop               = bnad_stop,
3026         .ndo_start_xmit         = bnad_start_xmit,
3027         .ndo_get_stats64                = bnad_get_stats64,
3028         .ndo_set_rx_mode        = bnad_set_rx_mode,
3029         .ndo_validate_addr      = eth_validate_addr,
3030         .ndo_set_mac_address    = bnad_set_mac_address,
3031         .ndo_change_mtu         = bnad_change_mtu,
3032         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3033         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3034 #ifdef CONFIG_NET_POLL_CONTROLLER
3035         .ndo_poll_controller    = bnad_netpoll
3036 #endif
3037 };
3038
3039 static void
3040 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3041 {
3042         struct net_device *netdev = bnad->netdev;
3043
3044         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3045                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3046                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3047
3048         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3049                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3050                 NETIF_F_TSO | NETIF_F_TSO6;
3051
3052         netdev->features |= netdev->hw_features |
3053                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3054
3055         if (using_dac)
3056                 netdev->features |= NETIF_F_HIGHDMA;
3057
3058         netdev->mem_start = bnad->mmio_start;
3059         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3060
3061         netdev->netdev_ops = &bnad_netdev_ops;
3062         bnad_set_ethtool_ops(netdev);
3063 }
3064
3065 /*
3066  * 1. Initialize the bnad structure
3067  * 2. Setup netdev pointer in pci_dev
3068  * 3. Initialze Tx free tasklet
3069  * 4. Initialize no. of TxQ & CQs & MSIX vectors
3070  */
3071 static int
3072 bnad_init(struct bnad *bnad,
3073           struct pci_dev *pdev, struct net_device *netdev)
3074 {
3075         unsigned long flags;
3076
3077         SET_NETDEV_DEV(netdev, &pdev->dev);
3078         pci_set_drvdata(pdev, netdev);
3079
3080         bnad->netdev = netdev;
3081         bnad->pcidev = pdev;
3082         bnad->mmio_start = pci_resource_start(pdev, 0);
3083         bnad->mmio_len = pci_resource_len(pdev, 0);
3084         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3085         if (!bnad->bar0) {
3086                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3087                 pci_set_drvdata(pdev, NULL);
3088                 return -ENOMEM;
3089         }
3090         pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3091                (unsigned long long) bnad->mmio_len);
3092
3093         spin_lock_irqsave(&bnad->bna_lock, flags);
3094         if (!bnad_msix_disable)
3095                 bnad->cfg_flags = BNAD_CF_MSIX;
3096
3097         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3098
3099         bnad_q_num_init(bnad);
3100         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3101
3102         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3103                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3104                          BNAD_MAILBOX_MSIX_VECTORS;
3105
3106         bnad->txq_depth = BNAD_TXQ_DEPTH;
3107         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3108
3109         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3110         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3111
3112         tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3113                      (unsigned long)bnad);
3114
3115         return 0;
3116 }
3117
3118 /*
3119  * Must be called after bnad_pci_uninit()
3120  * so that iounmap() and pci_set_drvdata(NULL)
3121  * happens only after PCI uninitialization.
3122  */
3123 static void
3124 bnad_uninit(struct bnad *bnad)
3125 {
3126         if (bnad->bar0)
3127                 iounmap(bnad->bar0);
3128         pci_set_drvdata(bnad->pcidev, NULL);
3129 }
3130
3131 /*
3132  * Initialize locks
3133         a) Per ioceth mutes used for serializing configuration
3134            changes from OS interface
3135         b) spin lock used to protect bna state machine
3136  */
3137 static void
3138 bnad_lock_init(struct bnad *bnad)
3139 {
3140         spin_lock_init(&bnad->bna_lock);
3141         mutex_init(&bnad->conf_mutex);
3142 }
3143
3144 static void
3145 bnad_lock_uninit(struct bnad *bnad)
3146 {
3147         mutex_destroy(&bnad->conf_mutex);
3148 }
3149
3150 /* PCI Initialization */
3151 static int
3152 bnad_pci_init(struct bnad *bnad,
3153               struct pci_dev *pdev, bool *using_dac)
3154 {
3155         int err;
3156
3157         err = pci_enable_device(pdev);
3158         if (err)
3159                 return err;
3160         err = pci_request_regions(pdev, BNAD_NAME);
3161         if (err)
3162                 goto disable_device;
3163         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3164             !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3165                 *using_dac = 1;
3166         } else {
3167                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3168                 if (err) {
3169                         err = dma_set_coherent_mask(&pdev->dev,
3170                                                     DMA_BIT_MASK(32));
3171                         if (err)
3172                                 goto release_regions;
3173                 }
3174                 *using_dac = 0;
3175         }
3176         pci_set_master(pdev);
3177         return 0;
3178
3179 release_regions:
3180         pci_release_regions(pdev);
3181 disable_device:
3182         pci_disable_device(pdev);
3183
3184         return err;
3185 }
3186
3187 static void
3188 bnad_pci_uninit(struct pci_dev *pdev)
3189 {
3190         pci_release_regions(pdev);
3191         pci_disable_device(pdev);
3192 }
3193
3194 static int __devinit
3195 bnad_pci_probe(struct pci_dev *pdev,
3196                 const struct pci_device_id *pcidev_id)
3197 {
3198         bool    using_dac;
3199         int     err;
3200         struct bnad *bnad;
3201         struct bna *bna;
3202         struct net_device *netdev;
3203         struct bfa_pcidev pcidev_info;
3204         unsigned long flags;
3205
3206         pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3207                pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3208
3209         mutex_lock(&bnad_fwimg_mutex);
3210         if (!cna_get_firmware_buf(pdev)) {
3211                 mutex_unlock(&bnad_fwimg_mutex);
3212                 pr_warn("Failed to load Firmware Image!\n");
3213                 return -ENODEV;
3214         }
3215         mutex_unlock(&bnad_fwimg_mutex);
3216
3217         /*
3218          * Allocates sizeof(struct net_device + struct bnad)
3219          * bnad = netdev->priv
3220          */
3221         netdev = alloc_etherdev(sizeof(struct bnad));
3222         if (!netdev) {
3223                 dev_err(&pdev->dev, "netdev allocation failed\n");
3224                 err = -ENOMEM;
3225                 return err;
3226         }
3227         bnad = netdev_priv(netdev);
3228
3229         bnad_lock_init(bnad);
3230
3231         mutex_lock(&bnad->conf_mutex);
3232         /*
3233          * PCI initialization
3234          *      Output : using_dac = 1 for 64 bit DMA
3235          *                         = 0 for 32 bit DMA
3236          */
3237         err = bnad_pci_init(bnad, pdev, &using_dac);
3238         if (err)
3239                 goto unlock_mutex;
3240
3241         /*
3242          * Initialize bnad structure
3243          * Setup relation between pci_dev & netdev
3244          * Init Tx free tasklet
3245          */
3246         err = bnad_init(bnad, pdev, netdev);
3247         if (err)
3248                 goto pci_uninit;
3249
3250         /* Initialize netdev structure, set up ethtool ops */
3251         bnad_netdev_init(bnad, using_dac);
3252
3253         /* Set link to down state */
3254         netif_carrier_off(netdev);
3255
3256         /* Get resource requirement form bna */
3257         spin_lock_irqsave(&bnad->bna_lock, flags);
3258         bna_res_req(&bnad->res_info[0]);
3259         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3260
3261         /* Allocate resources from bna */
3262         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3263         if (err)
3264                 goto drv_uninit;
3265
3266         bna = &bnad->bna;
3267
3268         /* Setup pcidev_info for bna_init() */
3269         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3270         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3271         pcidev_info.device_id = bnad->pcidev->device;
3272         pcidev_info.pci_bar_kva = bnad->bar0;
3273
3274         spin_lock_irqsave(&bnad->bna_lock, flags);
3275         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3276         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3277
3278         bnad->stats.bna_stats = &bna->stats;
3279
3280         bnad_enable_msix(bnad);
3281         err = bnad_mbox_irq_alloc(bnad);
3282         if (err)
3283                 goto res_free;
3284
3285
3286         /* Set up timers */
3287         setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3288                                 ((unsigned long)bnad));
3289         setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3290                                 ((unsigned long)bnad));
3291         setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3292                                 ((unsigned long)bnad));
3293         setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3294                                 ((unsigned long)bnad));
3295
3296         /* Now start the timer before calling IOC */
3297         mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3298                   jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3299
3300         /*
3301          * Start the chip
3302          * If the call back comes with error, we bail out.
3303          * This is a catastrophic error.
3304          */
3305         err = bnad_ioceth_enable(bnad);
3306         if (err) {
3307                 pr_err("BNA: Initialization failed err=%d\n",
3308                        err);
3309                 goto probe_success;
3310         }
3311
3312         spin_lock_irqsave(&bnad->bna_lock, flags);
3313         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3314                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3315                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3316                         bna_attr(bna)->num_rxp - 1);
3317                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3318                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3319                         err = -EIO;
3320         }
3321         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3322         if (err)
3323                 goto disable_ioceth;
3324
3325         spin_lock_irqsave(&bnad->bna_lock, flags);
3326         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3327         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3328
3329         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3330         if (err) {
3331                 err = -EIO;
3332                 goto disable_ioceth;
3333         }
3334
3335         spin_lock_irqsave(&bnad->bna_lock, flags);
3336         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3337         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3338
3339         /* Get the burnt-in mac */
3340         spin_lock_irqsave(&bnad->bna_lock, flags);
3341         bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3342         bnad_set_netdev_perm_addr(bnad);
3343         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3344
3345         mutex_unlock(&bnad->conf_mutex);
3346
3347         /* Finally, reguister with net_device layer */
3348         err = register_netdev(netdev);
3349         if (err) {
3350                 pr_err("BNA : Registering with netdev failed\n");
3351                 goto probe_uninit;
3352         }
3353         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3354
3355         return 0;
3356
3357 probe_success:
3358         mutex_unlock(&bnad->conf_mutex);
3359         return 0;
3360
3361 probe_uninit:
3362         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3363 disable_ioceth:
3364         bnad_ioceth_disable(bnad);
3365         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3366         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3367         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3368         spin_lock_irqsave(&bnad->bna_lock, flags);
3369         bna_uninit(bna);
3370         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3371         bnad_mbox_irq_free(bnad);
3372         bnad_disable_msix(bnad);
3373 res_free:
3374         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3375 drv_uninit:
3376         bnad_uninit(bnad);
3377 pci_uninit:
3378         bnad_pci_uninit(pdev);
3379 unlock_mutex:
3380         mutex_unlock(&bnad->conf_mutex);
3381         bnad_lock_uninit(bnad);
3382         free_netdev(netdev);
3383         return err;
3384 }
3385
3386 static void __devexit
3387 bnad_pci_remove(struct pci_dev *pdev)
3388 {
3389         struct net_device *netdev = pci_get_drvdata(pdev);
3390         struct bnad *bnad;
3391         struct bna *bna;
3392         unsigned long flags;
3393
3394         if (!netdev)
3395                 return;
3396
3397         pr_info("%s bnad_pci_remove\n", netdev->name);
3398         bnad = netdev_priv(netdev);
3399         bna = &bnad->bna;
3400
3401         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3402                 unregister_netdev(netdev);
3403
3404         mutex_lock(&bnad->conf_mutex);
3405         bnad_ioceth_disable(bnad);
3406         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3407         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3408         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3409         spin_lock_irqsave(&bnad->bna_lock, flags);
3410         bna_uninit(bna);
3411         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3412
3413         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3414         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3415         bnad_mbox_irq_free(bnad);
3416         bnad_disable_msix(bnad);
3417         bnad_pci_uninit(pdev);
3418         mutex_unlock(&bnad->conf_mutex);
3419         bnad_lock_uninit(bnad);
3420         bnad_uninit(bnad);
3421         free_netdev(netdev);
3422 }
3423
3424 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3425         {
3426                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3427                         PCI_DEVICE_ID_BROCADE_CT),
3428                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3429                 .class_mask =  0xffff00
3430         }, {0,  }
3431 };
3432
3433 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3434
3435 static struct pci_driver bnad_pci_driver = {
3436         .name = BNAD_NAME,
3437         .id_table = bnad_pci_id_table,
3438         .probe = bnad_pci_probe,
3439         .remove = __devexit_p(bnad_pci_remove),
3440 };
3441
3442 static int __init
3443 bnad_module_init(void)
3444 {
3445         int err;
3446
3447         pr_info("Brocade 10G Ethernet driver - version: %s\n",
3448                         BNAD_VERSION);
3449
3450         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3451
3452         err = pci_register_driver(&bnad_pci_driver);
3453         if (err < 0) {
3454                 pr_err("bna : PCI registration failed in module init "
3455                        "(%d)\n", err);
3456                 return err;
3457         }
3458
3459         return 0;
3460 }
3461
3462 static void __exit
3463 bnad_module_exit(void)
3464 {
3465         pci_unregister_driver(&bnad_pci_driver);
3466
3467         if (bfi_fw)
3468                 release_firmware(bfi_fw);
3469 }
3470
3471 module_init(bnad_module_init);
3472 module_exit(bnad_module_exit);
3473
3474 MODULE_AUTHOR("Brocade");
3475 MODULE_LICENSE("GPL");
3476 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3477 MODULE_VERSION(BNAD_VERSION);
3478 MODULE_FIRMWARE(CNA_FW_FILE_CT);