bna: ENET and Tx Rx Redesign Enablement
[pandora-kernel.git] / drivers / net / ethernet / brocade / bna / bnad.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28
29 #include "bnad.h"
30 #include "bna.h"
31 #include "cna.h"
32
33 static DEFINE_MUTEX(bnad_fwimg_mutex);
34
35 /*
36  * Module params
37  */
38 static uint bnad_msix_disable;
39 module_param(bnad_msix_disable, uint, 0444);
40 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42 static uint bnad_ioc_auto_recover = 1;
43 module_param(bnad_ioc_auto_recover, uint, 0444);
44 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46 /*
47  * Global variables
48  */
49 u32 bnad_rxqs_per_cq = 2;
50
51 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52
53 /*
54  * Local MACROS
55  */
56 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
61         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
62          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
63          ((_bnad)->pcidev->irq))
64
65 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)       \
66 do {                                                            \
67         (_res_info)->res_type = BNA_RES_T_MEM;                  \
68         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
69         (_res_info)->res_u.mem_info.num = (_num);               \
70         (_res_info)->res_u.mem_info.len =                       \
71         sizeof(struct bnad_unmap_q) +                           \
72         (sizeof(struct bnad_skb_unmap) * ((_depth) - 1));       \
73 } while (0)
74
75 #define BNAD_TXRX_SYNC_MDELAY   250     /* 250 msecs */
76
77 /*
78  * Reinitialize completions in CQ, once Rx is taken down
79  */
80 static void
81 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82 {
83         struct bna_cq_entry *cmpl, *next_cmpl;
84         unsigned int wi_range, wis = 0, ccb_prod = 0;
85         int i;
86
87         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88                             wi_range);
89
90         for (i = 0; i < ccb->q_depth; i++) {
91                 wis++;
92                 if (likely(--wi_range))
93                         next_cmpl = cmpl + 1;
94                 else {
95                         BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96                         wis = 0;
97                         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98                                                 next_cmpl, wi_range);
99                 }
100                 cmpl->valid = 0;
101                 cmpl = next_cmpl;
102         }
103 }
104
105 /*
106  * Frees all pending Tx Bufs
107  * At this point no activity is expected on the Q,
108  * so DMA unmap & freeing is fine.
109  */
110 static void
111 bnad_free_all_txbufs(struct bnad *bnad,
112                  struct bna_tcb *tcb)
113 {
114         u32             unmap_cons;
115         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
116         struct bnad_skb_unmap *unmap_array;
117         struct sk_buff          *skb = NULL;
118         int                     i;
119
120         unmap_array = unmap_q->unmap_array;
121
122         unmap_cons = 0;
123         while (unmap_cons < unmap_q->q_depth) {
124                 skb = unmap_array[unmap_cons].skb;
125                 if (!skb) {
126                         unmap_cons++;
127                         continue;
128                 }
129                 unmap_array[unmap_cons].skb = NULL;
130
131                 dma_unmap_single(&bnad->pcidev->dev,
132                                  dma_unmap_addr(&unmap_array[unmap_cons],
133                                                 dma_addr), skb_headlen(skb),
134                                                 DMA_TO_DEVICE);
135
136                 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
137                 if (++unmap_cons >= unmap_q->q_depth)
138                         break;
139
140                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
141                         dma_unmap_page(&bnad->pcidev->dev,
142                                        dma_unmap_addr(&unmap_array[unmap_cons],
143                                                       dma_addr),
144                                        skb_shinfo(skb)->frags[i].size,
145                                        DMA_TO_DEVICE);
146                         dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
147                                            0);
148                         if (++unmap_cons >= unmap_q->q_depth)
149                                 break;
150                 }
151                 dev_kfree_skb_any(skb);
152         }
153 }
154
155 /* Data Path Handlers */
156
157 /*
158  * bnad_free_txbufs : Frees the Tx bufs on Tx completion
159  * Can be called in a) Interrupt context
160  *                  b) Sending context
161  *                  c) Tasklet context
162  */
163 static u32
164 bnad_free_txbufs(struct bnad *bnad,
165                  struct bna_tcb *tcb)
166 {
167         u32             sent_packets = 0, sent_bytes = 0;
168         u16             wis, unmap_cons, updated_hw_cons;
169         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
170         struct bnad_skb_unmap *unmap_array;
171         struct sk_buff          *skb;
172         int i;
173
174         /*
175          * Just return if TX is stopped. This check is useful
176          * when bnad_free_txbufs() runs out of a tasklet scheduled
177          * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
178          * but this routine runs actually after the cleanup has been
179          * executed.
180          */
181         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
182                 return 0;
183
184         updated_hw_cons = *(tcb->hw_consumer_index);
185
186         wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
187                                   updated_hw_cons, tcb->q_depth);
188
189         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
190
191         unmap_array = unmap_q->unmap_array;
192         unmap_cons = unmap_q->consumer_index;
193
194         prefetch(&unmap_array[unmap_cons + 1]);
195         while (wis) {
196                 skb = unmap_array[unmap_cons].skb;
197
198                 unmap_array[unmap_cons].skb = NULL;
199
200                 sent_packets++;
201                 sent_bytes += skb->len;
202                 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203
204                 dma_unmap_single(&bnad->pcidev->dev,
205                                  dma_unmap_addr(&unmap_array[unmap_cons],
206                                                 dma_addr), skb_headlen(skb),
207                                  DMA_TO_DEVICE);
208                 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
209                 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
210
211                 prefetch(&unmap_array[unmap_cons + 1]);
212                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
213                         prefetch(&unmap_array[unmap_cons + 1]);
214
215                         dma_unmap_page(&bnad->pcidev->dev,
216                                        dma_unmap_addr(&unmap_array[unmap_cons],
217                                                       dma_addr),
218                                        skb_shinfo(skb)->frags[i].size,
219                                        DMA_TO_DEVICE);
220                         dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
221                                            0);
222                         BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
223                 }
224                 dev_kfree_skb_any(skb);
225         }
226
227         /* Update consumer pointers. */
228         tcb->consumer_index = updated_hw_cons;
229         unmap_q->consumer_index = unmap_cons;
230
231         tcb->txq->tx_packets += sent_packets;
232         tcb->txq->tx_bytes += sent_bytes;
233
234         return sent_packets;
235 }
236
237 /* Tx Free Tasklet function */
238 /* Frees for all the tcb's in all the Tx's */
239 /*
240  * Scheduled from sending context, so that
241  * the fat Tx lock is not held for too long
242  * in the sending context.
243  */
244 static void
245 bnad_tx_free_tasklet(unsigned long bnad_ptr)
246 {
247         struct bnad *bnad = (struct bnad *)bnad_ptr;
248         struct bna_tcb *tcb;
249         u32             acked = 0;
250         int                     i, j;
251
252         for (i = 0; i < bnad->num_tx; i++) {
253                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
254                         tcb = bnad->tx_info[i].tcb[j];
255                         if (!tcb)
256                                 continue;
257                         if (((u16) (*tcb->hw_consumer_index) !=
258                                 tcb->consumer_index) &&
259                                 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
260                                                   &tcb->flags))) {
261                                 acked = bnad_free_txbufs(bnad, tcb);
262                                 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
263                                         &tcb->flags)))
264                                         bna_ib_ack(tcb->i_dbell, acked);
265                                 smp_mb__before_clear_bit();
266                                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
267                         }
268                         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
269                                                 &tcb->flags)))
270                                 continue;
271                         if (netif_queue_stopped(bnad->netdev)) {
272                                 if (acked && netif_carrier_ok(bnad->netdev) &&
273                                         BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
274                                                 BNAD_NETIF_WAKE_THRESHOLD) {
275                                         netif_wake_queue(bnad->netdev);
276                                         /* TODO */
277                                         /* Counters for individual TxQs? */
278                                         BNAD_UPDATE_CTR(bnad,
279                                                 netif_queue_wakeup);
280                                 }
281                         }
282                 }
283         }
284 }
285
286 static u32
287 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
288 {
289         struct net_device *netdev = bnad->netdev;
290         u32 sent = 0;
291
292         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
293                 return 0;
294
295         sent = bnad_free_txbufs(bnad, tcb);
296         if (sent) {
297                 if (netif_queue_stopped(netdev) &&
298                     netif_carrier_ok(netdev) &&
299                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
300                                     BNAD_NETIF_WAKE_THRESHOLD) {
301                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
302                                 netif_wake_queue(netdev);
303                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
304                         }
305                 }
306         }
307
308         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
309                 bna_ib_ack(tcb->i_dbell, sent);
310
311         smp_mb__before_clear_bit();
312         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
313
314         return sent;
315 }
316
317 /* MSIX Tx Completion Handler */
318 static irqreturn_t
319 bnad_msix_tx(int irq, void *data)
320 {
321         struct bna_tcb *tcb = (struct bna_tcb *)data;
322         struct bnad *bnad = tcb->bnad;
323
324         bnad_tx(bnad, tcb);
325
326         return IRQ_HANDLED;
327 }
328
329 static void
330 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
331 {
332         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
333
334         rcb->producer_index = 0;
335         rcb->consumer_index = 0;
336
337         unmap_q->producer_index = 0;
338         unmap_q->consumer_index = 0;
339 }
340
341 static void
342 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
343 {
344         struct bnad_unmap_q *unmap_q;
345         struct bnad_skb_unmap *unmap_array;
346         struct sk_buff *skb;
347         int unmap_cons;
348
349         unmap_q = rcb->unmap_q;
350         unmap_array = unmap_q->unmap_array;
351         for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
352                 skb = unmap_array[unmap_cons].skb;
353                 if (!skb)
354                         continue;
355                 unmap_array[unmap_cons].skb = NULL;
356                 dma_unmap_single(&bnad->pcidev->dev,
357                                  dma_unmap_addr(&unmap_array[unmap_cons],
358                                                 dma_addr),
359                                  rcb->rxq->buffer_size,
360                                  DMA_FROM_DEVICE);
361                 dev_kfree_skb(skb);
362         }
363         bnad_reset_rcb(bnad, rcb);
364 }
365
366 static void
367 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
368 {
369         u16 to_alloc, alloced, unmap_prod, wi_range;
370         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
371         struct bnad_skb_unmap *unmap_array;
372         struct bna_rxq_entry *rxent;
373         struct sk_buff *skb;
374         dma_addr_t dma_addr;
375
376         alloced = 0;
377         to_alloc =
378                 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
379
380         unmap_array = unmap_q->unmap_array;
381         unmap_prod = unmap_q->producer_index;
382
383         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
384
385         while (to_alloc--) {
386                 if (!wi_range) {
387                         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
388                                              wi_range);
389                 }
390                 skb = netdev_alloc_skb_ip_align(bnad->netdev,
391                                                 rcb->rxq->buffer_size);
392                 if (unlikely(!skb)) {
393                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394                         goto finishing;
395                 }
396                 unmap_array[unmap_prod].skb = skb;
397                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
398                                           rcb->rxq->buffer_size,
399                                           DMA_FROM_DEVICE);
400                 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
401                                    dma_addr);
402                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
403                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
404
405                 rxent++;
406                 wi_range--;
407                 alloced++;
408         }
409
410 finishing:
411         if (likely(alloced)) {
412                 unmap_q->producer_index = unmap_prod;
413                 rcb->producer_index = unmap_prod;
414                 smp_mb();
415                 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
416                         bna_rxq_prod_indx_doorbell(rcb);
417         }
418 }
419
420 static inline void
421 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
422 {
423         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
424
425         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
426                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
427                          >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
428                         bnad_alloc_n_post_rxbufs(bnad, rcb);
429                 smp_mb__before_clear_bit();
430                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
431         }
432 }
433
434 static u32
435 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
436 {
437         struct bna_cq_entry *cmpl, *next_cmpl;
438         struct bna_rcb *rcb = NULL;
439         unsigned int wi_range, packets = 0, wis = 0;
440         struct bnad_unmap_q *unmap_q;
441         struct bnad_skb_unmap *unmap_array;
442         struct sk_buff *skb;
443         u32 flags, unmap_cons;
444         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
445         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
446
447         set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
448
449         if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
450                 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
451                 return 0;
452         }
453
454         prefetch(bnad->netdev);
455         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
456                             wi_range);
457         BUG_ON(!(wi_range <= ccb->q_depth));
458         while (cmpl->valid && packets < budget) {
459                 packets++;
460                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
461
462                 if (bna_is_small_rxq(cmpl->rxq_id))
463                         rcb = ccb->rcb[1];
464                 else
465                         rcb = ccb->rcb[0];
466
467                 unmap_q = rcb->unmap_q;
468                 unmap_array = unmap_q->unmap_array;
469                 unmap_cons = unmap_q->consumer_index;
470
471                 skb = unmap_array[unmap_cons].skb;
472                 BUG_ON(!(skb));
473                 unmap_array[unmap_cons].skb = NULL;
474                 dma_unmap_single(&bnad->pcidev->dev,
475                                  dma_unmap_addr(&unmap_array[unmap_cons],
476                                                 dma_addr),
477                                  rcb->rxq->buffer_size,
478                                  DMA_FROM_DEVICE);
479                 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
480
481                 /* Should be more efficient ? Performance ? */
482                 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
483
484                 wis++;
485                 if (likely(--wi_range))
486                         next_cmpl = cmpl + 1;
487                 else {
488                         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
489                         wis = 0;
490                         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
491                                                 next_cmpl, wi_range);
492                         BUG_ON(!(wi_range <= ccb->q_depth));
493                 }
494                 prefetch(next_cmpl);
495
496                 flags = ntohl(cmpl->flags);
497                 if (unlikely
498                     (flags &
499                      (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
500                       BNA_CQ_EF_TOO_LONG))) {
501                         dev_kfree_skb_any(skb);
502                         rcb->rxq->rx_packets_with_error++;
503                         goto next;
504                 }
505
506                 skb_put(skb, ntohs(cmpl->length));
507                 if (likely
508                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
509                      (((flags & BNA_CQ_EF_IPV4) &&
510                       (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
511                       (flags & BNA_CQ_EF_IPV6)) &&
512                       (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
513                       (flags & BNA_CQ_EF_L4_CKSUM_OK)))
514                         skb->ip_summed = CHECKSUM_UNNECESSARY;
515                 else
516                         skb_checksum_none_assert(skb);
517
518                 rcb->rxq->rx_packets++;
519                 rcb->rxq->rx_bytes += skb->len;
520                 skb->protocol = eth_type_trans(skb, bnad->netdev);
521
522                 if (flags & BNA_CQ_EF_VLAN)
523                         __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
524
525                 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
526                         napi_gro_receive(&rx_ctrl->napi, skb);
527                 else {
528                         netif_receive_skb(skb);
529                 }
530
531 next:
532                 cmpl->valid = 0;
533                 cmpl = next_cmpl;
534         }
535
536         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
537
538         if (likely(ccb)) {
539                 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
540                         bna_ib_ack(ccb->i_dbell, packets);
541                 bnad_refill_rxq(bnad, ccb->rcb[0]);
542                 if (ccb->rcb[1])
543                         bnad_refill_rxq(bnad, ccb->rcb[1]);
544         } else {
545                 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
546                         bna_ib_ack(ccb->i_dbell, 0);
547         }
548
549         clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
550
551         return packets;
552 }
553
554 static void
555 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
556 {
557         if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
558                 return;
559
560         bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561         bna_ib_ack(ccb->i_dbell, 0);
562 }
563
564 static void
565 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
566 {
567         unsigned long flags;
568
569         /* Because of polling context */
570         spin_lock_irqsave(&bnad->bna_lock, flags);
571         bnad_enable_rx_irq_unsafe(ccb);
572         spin_unlock_irqrestore(&bnad->bna_lock, flags);
573 }
574
575 static void
576 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
577 {
578         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
579         struct napi_struct *napi = &rx_ctrl->napi;
580
581         if (likely(napi_schedule_prep(napi))) {
582                 bnad_disable_rx_irq(bnad, ccb);
583                 __napi_schedule(napi);
584         }
585         BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
586 }
587
588 /* MSIX Rx Path Handler */
589 static irqreturn_t
590 bnad_msix_rx(int irq, void *data)
591 {
592         struct bna_ccb *ccb = (struct bna_ccb *)data;
593         struct bnad *bnad = ccb->bnad;
594
595         bnad_netif_rx_schedule_poll(bnad, ccb);
596
597         return IRQ_HANDLED;
598 }
599
600 /* Interrupt handlers */
601
602 /* Mbox Interrupt Handlers */
603 static irqreturn_t
604 bnad_msix_mbox_handler(int irq, void *data)
605 {
606         u32 intr_status;
607         unsigned long flags;
608         struct bnad *bnad = (struct bnad *)data;
609
610         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
611                 return IRQ_HANDLED;
612
613         spin_lock_irqsave(&bnad->bna_lock, flags);
614
615         bna_intr_status_get(&bnad->bna, intr_status);
616
617         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
618                 bna_mbox_handler(&bnad->bna, intr_status);
619
620         spin_unlock_irqrestore(&bnad->bna_lock, flags);
621
622         return IRQ_HANDLED;
623 }
624
625 static irqreturn_t
626 bnad_isr(int irq, void *data)
627 {
628         int i, j;
629         u32 intr_status;
630         unsigned long flags;
631         struct bnad *bnad = (struct bnad *)data;
632         struct bnad_rx_info *rx_info;
633         struct bnad_rx_ctrl *rx_ctrl;
634         struct bna_tcb *tcb = NULL;
635
636         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
637                 return IRQ_NONE;
638
639         bna_intr_status_get(&bnad->bna, intr_status);
640
641         if (unlikely(!intr_status))
642                 return IRQ_NONE;
643
644         spin_lock_irqsave(&bnad->bna_lock, flags);
645
646         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
647                 bna_mbox_handler(&bnad->bna, intr_status);
648
649         spin_unlock_irqrestore(&bnad->bna_lock, flags);
650
651         if (!BNA_IS_INTX_DATA_INTR(intr_status))
652                 return IRQ_HANDLED;
653
654         /* Process data interrupts */
655         /* Tx processing */
656         for (i = 0; i < bnad->num_tx; i++) {
657                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
658                         tcb = bnad->tx_info[i].tcb[j];
659                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
660                                 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
661                 }
662         }
663         /* Rx processing */
664         for (i = 0; i < bnad->num_rx; i++) {
665                 rx_info = &bnad->rx_info[i];
666                 if (!rx_info->rx)
667                         continue;
668                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
669                         rx_ctrl = &rx_info->rx_ctrl[j];
670                         if (rx_ctrl->ccb)
671                                 bnad_netif_rx_schedule_poll(bnad,
672                                                             rx_ctrl->ccb);
673                 }
674         }
675         return IRQ_HANDLED;
676 }
677
678 /*
679  * Called in interrupt / callback context
680  * with bna_lock held, so cfg_flags access is OK
681  */
682 static void
683 bnad_enable_mbox_irq(struct bnad *bnad)
684 {
685         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
686
687         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
688 }
689
690 /*
691  * Called with bnad->bna_lock held b'cos of
692  * bnad->cfg_flags access.
693  */
694 static void
695 bnad_disable_mbox_irq(struct bnad *bnad)
696 {
697         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
698
699         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
700 }
701
702 static void
703 bnad_set_netdev_perm_addr(struct bnad *bnad)
704 {
705         struct net_device *netdev = bnad->netdev;
706
707         memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
708         if (is_zero_ether_addr(netdev->dev_addr))
709                 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
710 }
711
712 /* Control Path Handlers */
713
714 /* Callbacks */
715 void
716 bnad_cb_mbox_intr_enable(struct bnad *bnad)
717 {
718         bnad_enable_mbox_irq(bnad);
719 }
720
721 void
722 bnad_cb_mbox_intr_disable(struct bnad *bnad)
723 {
724         bnad_disable_mbox_irq(bnad);
725 }
726
727 void
728 bnad_cb_ioceth_ready(struct bnad *bnad)
729 {
730         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
731         complete(&bnad->bnad_completions.ioc_comp);
732 }
733
734 void
735 bnad_cb_ioceth_failed(struct bnad *bnad)
736 {
737         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
738         complete(&bnad->bnad_completions.ioc_comp);
739 }
740
741 void
742 bnad_cb_ioceth_disabled(struct bnad *bnad)
743 {
744         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
745         complete(&bnad->bnad_completions.ioc_comp);
746 }
747
748 static void
749 bnad_cb_enet_disabled(void *arg)
750 {
751         struct bnad *bnad = (struct bnad *)arg;
752
753         netif_carrier_off(bnad->netdev);
754         complete(&bnad->bnad_completions.enet_comp);
755 }
756
757 void
758 bnad_cb_ethport_link_status(struct bnad *bnad,
759                         enum bna_link_status link_status)
760 {
761         bool link_up = 0;
762
763         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
764
765         if (link_status == BNA_CEE_UP) {
766                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
767                         BNAD_UPDATE_CTR(bnad, cee_toggle);
768                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
769         } else {
770                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
771                         BNAD_UPDATE_CTR(bnad, cee_toggle);
772                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
773         }
774
775         if (link_up) {
776                 if (!netif_carrier_ok(bnad->netdev)) {
777                         uint tx_id, tcb_id;
778                         printk(KERN_WARNING "bna: %s link up\n",
779                                 bnad->netdev->name);
780                         netif_carrier_on(bnad->netdev);
781                         BNAD_UPDATE_CTR(bnad, link_toggle);
782                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
783                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
784                                       tcb_id++) {
785                                         struct bna_tcb *tcb =
786                                         bnad->tx_info[tx_id].tcb[tcb_id];
787                                         u32 txq_id;
788                                         if (!tcb)
789                                                 continue;
790
791                                         txq_id = tcb->id;
792
793                                         if (test_bit(BNAD_TXQ_TX_STARTED,
794                                                      &tcb->flags)) {
795                                                 /*
796                                                  * Force an immediate
797                                                  * Transmit Schedule */
798                                                 printk(KERN_INFO "bna: %s %d "
799                                                       "TXQ_STARTED\n",
800                                                        bnad->netdev->name,
801                                                        txq_id);
802                                                 netif_wake_subqueue(
803                                                                 bnad->netdev,
804                                                                 txq_id);
805                                                 BNAD_UPDATE_CTR(bnad,
806                                                         netif_queue_wakeup);
807                                         } else {
808                                                 netif_stop_subqueue(
809                                                                 bnad->netdev,
810                                                                 txq_id);
811                                                 BNAD_UPDATE_CTR(bnad,
812                                                         netif_queue_stop);
813                                         }
814                                 }
815                         }
816                 }
817         } else {
818                 if (netif_carrier_ok(bnad->netdev)) {
819                         printk(KERN_WARNING "bna: %s link down\n",
820                                 bnad->netdev->name);
821                         netif_carrier_off(bnad->netdev);
822                         BNAD_UPDATE_CTR(bnad, link_toggle);
823                 }
824         }
825 }
826
827 static void
828 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
829 {
830         struct bnad *bnad = (struct bnad *)arg;
831
832         complete(&bnad->bnad_completions.tx_comp);
833 }
834
835 static void
836 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
837 {
838         struct bnad_tx_info *tx_info =
839                         (struct bnad_tx_info *)tcb->txq->tx->priv;
840         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
841
842         tx_info->tcb[tcb->id] = tcb;
843         unmap_q->producer_index = 0;
844         unmap_q->consumer_index = 0;
845         unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
846 }
847
848 static void
849 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
850 {
851         struct bnad_tx_info *tx_info =
852                         (struct bnad_tx_info *)tcb->txq->tx->priv;
853         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
854
855         while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
856                 cpu_relax();
857
858         bnad_free_all_txbufs(bnad, tcb);
859
860         unmap_q->producer_index = 0;
861         unmap_q->consumer_index = 0;
862
863         smp_mb__before_clear_bit();
864         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
865
866         tx_info->tcb[tcb->id] = NULL;
867 }
868
869 static void
870 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
871 {
872         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
873
874         unmap_q->producer_index = 0;
875         unmap_q->consumer_index = 0;
876         unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
877 }
878
879 static void
880 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
881 {
882         bnad_free_all_rxbufs(bnad, rcb);
883 }
884
885 static void
886 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
887 {
888         struct bnad_rx_info *rx_info =
889                         (struct bnad_rx_info *)ccb->cq->rx->priv;
890
891         rx_info->rx_ctrl[ccb->id].ccb = ccb;
892         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
893 }
894
895 static void
896 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
897 {
898         struct bnad_rx_info *rx_info =
899                         (struct bnad_rx_info *)ccb->cq->rx->priv;
900
901         rx_info->rx_ctrl[ccb->id].ccb = NULL;
902 }
903
904 static void
905 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
906 {
907         struct bnad_tx_info *tx_info =
908                         (struct bnad_tx_info *)tx->priv;
909         struct bna_tcb *tcb;
910         u32 txq_id;
911         int i;
912
913         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
914                 tcb = tx_info->tcb[i];
915                 if (!tcb)
916                         continue;
917                 txq_id = tcb->id;
918                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
919                 netif_stop_subqueue(bnad->netdev, txq_id);
920                 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
921                         bnad->netdev->name, txq_id);
922         }
923 }
924
925 static void
926 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
927 {
928         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
929         struct bna_tcb *tcb;
930         struct bnad_unmap_q *unmap_q;
931         u32 txq_id;
932         int i;
933
934         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
935                 tcb = tx_info->tcb[i];
936                 if (!tcb)
937                         continue;
938                 txq_id = tcb->id;
939
940                 unmap_q = tcb->unmap_q;
941
942                 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
943                         continue;
944
945                 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
946                         cpu_relax();
947
948                 bnad_free_all_txbufs(bnad, tcb);
949
950                 unmap_q->producer_index = 0;
951                 unmap_q->consumer_index = 0;
952
953                 smp_mb__before_clear_bit();
954                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
955
956                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
957
958                 if (netif_carrier_ok(bnad->netdev)) {
959                         printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
960                                 bnad->netdev->name, txq_id);
961                         netif_wake_subqueue(bnad->netdev, txq_id);
962                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
963                 }
964         }
965
966         /*
967          * Workaround for first ioceth enable failure & we
968          * get a 0 MAC address. We try to get the MAC address
969          * again here.
970          */
971         if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
972                 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
973                 bnad_set_netdev_perm_addr(bnad);
974         }
975 }
976
977 static void
978 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
979 {
980         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
981         struct bna_tcb *tcb;
982         int i;
983
984         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
985                 tcb = tx_info->tcb[i];
986                 if (!tcb)
987                         continue;
988         }
989
990         mdelay(BNAD_TXRX_SYNC_MDELAY);
991         bna_tx_cleanup_complete(tx);
992 }
993
994 static void
995 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
996 {
997         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
998         struct bna_ccb *ccb;
999         struct bnad_rx_ctrl *rx_ctrl;
1000         int i;
1001
1002         mdelay(BNAD_TXRX_SYNC_MDELAY);
1003
1004         for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
1005                 rx_ctrl = &rx_info->rx_ctrl[i];
1006                 ccb = rx_ctrl->ccb;
1007                 if (!ccb)
1008                         continue;
1009
1010                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1011
1012                 if (ccb->rcb[1])
1013                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1014
1015                 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1016                         cpu_relax();
1017         }
1018
1019         bna_rx_cleanup_complete(rx);
1020 }
1021
1022 static void
1023 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1024 {
1025         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1026         struct bna_ccb *ccb;
1027         struct bna_rcb *rcb;
1028         struct bnad_rx_ctrl *rx_ctrl;
1029         struct bnad_unmap_q *unmap_q;
1030         int i;
1031         int j;
1032
1033         for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
1034                 rx_ctrl = &rx_info->rx_ctrl[i];
1035                 ccb = rx_ctrl->ccb;
1036                 if (!ccb)
1037                         continue;
1038
1039                 bnad_cq_cmpl_init(bnad, ccb);
1040
1041                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1042                         rcb = ccb->rcb[j];
1043                         if (!rcb)
1044                                 continue;
1045                         bnad_free_all_rxbufs(bnad, rcb);
1046
1047                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1048                         unmap_q = rcb->unmap_q;
1049
1050                         /* Now allocate & post buffers for this RCB */
1051                         /* !!Allocation in callback context */
1052                         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1053                                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1054                                         >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1055                                         bnad_alloc_n_post_rxbufs(bnad, rcb);
1056                                         smp_mb__before_clear_bit();
1057                                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1058                         }
1059                 }
1060         }
1061 }
1062
1063 static void
1064 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1065 {
1066         struct bnad *bnad = (struct bnad *)arg;
1067
1068         complete(&bnad->bnad_completions.rx_comp);
1069 }
1070
1071 static void
1072 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1073 {
1074         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1075         complete(&bnad->bnad_completions.mcast_comp);
1076 }
1077
1078 void
1079 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1080                        struct bna_stats *stats)
1081 {
1082         if (status == BNA_CB_SUCCESS)
1083                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1084
1085         if (!netif_running(bnad->netdev) ||
1086                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1087                 return;
1088
1089         mod_timer(&bnad->stats_timer,
1090                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1091 }
1092
1093 static void
1094 bnad_cb_enet_mtu_set(struct bnad *bnad)
1095 {
1096         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1097         complete(&bnad->bnad_completions.mtu_comp);
1098 }
1099
1100 /* Resource allocation, free functions */
1101
1102 static void
1103 bnad_mem_free(struct bnad *bnad,
1104               struct bna_mem_info *mem_info)
1105 {
1106         int i;
1107         dma_addr_t dma_pa;
1108
1109         if (mem_info->mdl == NULL)
1110                 return;
1111
1112         for (i = 0; i < mem_info->num; i++) {
1113                 if (mem_info->mdl[i].kva != NULL) {
1114                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1115                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1116                                                 dma_pa);
1117                                 dma_free_coherent(&bnad->pcidev->dev,
1118                                                   mem_info->mdl[i].len,
1119                                                   mem_info->mdl[i].kva, dma_pa);
1120                         } else
1121                                 kfree(mem_info->mdl[i].kva);
1122                 }
1123         }
1124         kfree(mem_info->mdl);
1125         mem_info->mdl = NULL;
1126 }
1127
1128 static int
1129 bnad_mem_alloc(struct bnad *bnad,
1130                struct bna_mem_info *mem_info)
1131 {
1132         int i;
1133         dma_addr_t dma_pa;
1134
1135         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1136                 mem_info->mdl = NULL;
1137                 return 0;
1138         }
1139
1140         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1141                                 GFP_KERNEL);
1142         if (mem_info->mdl == NULL)
1143                 return -ENOMEM;
1144
1145         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1146                 for (i = 0; i < mem_info->num; i++) {
1147                         mem_info->mdl[i].len = mem_info->len;
1148                         mem_info->mdl[i].kva =
1149                                 dma_alloc_coherent(&bnad->pcidev->dev,
1150                                                 mem_info->len, &dma_pa,
1151                                                 GFP_KERNEL);
1152
1153                         if (mem_info->mdl[i].kva == NULL)
1154                                 goto err_return;
1155
1156                         BNA_SET_DMA_ADDR(dma_pa,
1157                                          &(mem_info->mdl[i].dma));
1158                 }
1159         } else {
1160                 for (i = 0; i < mem_info->num; i++) {
1161                         mem_info->mdl[i].len = mem_info->len;
1162                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1163                                                         GFP_KERNEL);
1164                         if (mem_info->mdl[i].kva == NULL)
1165                                 goto err_return;
1166                 }
1167         }
1168
1169         return 0;
1170
1171 err_return:
1172         bnad_mem_free(bnad, mem_info);
1173         return -ENOMEM;
1174 }
1175
1176 /* Free IRQ for Mailbox */
1177 static void
1178 bnad_mbox_irq_free(struct bnad *bnad)
1179 {
1180         int irq;
1181         unsigned long flags;
1182
1183         spin_lock_irqsave(&bnad->bna_lock, flags);
1184         bnad_disable_mbox_irq(bnad);
1185         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1186
1187         irq = BNAD_GET_MBOX_IRQ(bnad);
1188         free_irq(irq, bnad);
1189 }
1190
1191 /*
1192  * Allocates IRQ for Mailbox, but keep it disabled
1193  * This will be enabled once we get the mbox enable callback
1194  * from bna
1195  */
1196 static int
1197 bnad_mbox_irq_alloc(struct bnad *bnad)
1198 {
1199         int             err = 0;
1200         unsigned long   irq_flags, flags;
1201         u32     irq;
1202         irq_handler_t   irq_handler;
1203
1204         spin_lock_irqsave(&bnad->bna_lock, flags);
1205         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1206                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1207                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1208                 irq_flags = 0;
1209         } else {
1210                 irq_handler = (irq_handler_t)bnad_isr;
1211                 irq = bnad->pcidev->irq;
1212                 irq_flags = IRQF_SHARED;
1213         }
1214
1215         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1216         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1217
1218         /*
1219          * Set the Mbox IRQ disable flag, so that the IRQ handler
1220          * called from request_irq() for SHARED IRQs do not execute
1221          */
1222         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1223
1224         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1225
1226         err = request_irq(irq, irq_handler, irq_flags,
1227                           bnad->mbox_irq_name, bnad);
1228
1229         return err;
1230 }
1231
1232 static void
1233 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1234 {
1235         kfree(intr_info->idl);
1236         intr_info->idl = NULL;
1237 }
1238
1239 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1240 static int
1241 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1242                     u32 txrx_id, struct bna_intr_info *intr_info)
1243 {
1244         int i, vector_start = 0;
1245         u32 cfg_flags;
1246         unsigned long flags;
1247
1248         spin_lock_irqsave(&bnad->bna_lock, flags);
1249         cfg_flags = bnad->cfg_flags;
1250         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1251
1252         if (cfg_flags & BNAD_CF_MSIX) {
1253                 intr_info->intr_type = BNA_INTR_T_MSIX;
1254                 intr_info->idl = kcalloc(intr_info->num,
1255                                         sizeof(struct bna_intr_descr),
1256                                         GFP_KERNEL);
1257                 if (!intr_info->idl)
1258                         return -ENOMEM;
1259
1260                 switch (src) {
1261                 case BNAD_INTR_TX:
1262                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1263                         break;
1264
1265                 case BNAD_INTR_RX:
1266                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1267                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1268                                         txrx_id;
1269                         break;
1270
1271                 default:
1272                         BUG();
1273                 }
1274
1275                 for (i = 0; i < intr_info->num; i++)
1276                         intr_info->idl[i].vector = vector_start + i;
1277         } else {
1278                 intr_info->intr_type = BNA_INTR_T_INTX;
1279                 intr_info->num = 1;
1280                 intr_info->idl = kcalloc(intr_info->num,
1281                                         sizeof(struct bna_intr_descr),
1282                                         GFP_KERNEL);
1283                 if (!intr_info->idl)
1284                         return -ENOMEM;
1285
1286                 switch (src) {
1287                 case BNAD_INTR_TX:
1288                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1289                         break;
1290
1291                 case BNAD_INTR_RX:
1292                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1293                         break;
1294                 }
1295         }
1296         return 0;
1297 }
1298
1299 /**
1300  * NOTE: Should be called for MSIX only
1301  * Unregisters Tx MSIX vector(s) from the kernel
1302  */
1303 static void
1304 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1305                         int num_txqs)
1306 {
1307         int i;
1308         int vector_num;
1309
1310         for (i = 0; i < num_txqs; i++) {
1311                 if (tx_info->tcb[i] == NULL)
1312                         continue;
1313
1314                 vector_num = tx_info->tcb[i]->intr_vector;
1315                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1316         }
1317 }
1318
1319 /**
1320  * NOTE: Should be called for MSIX only
1321  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1322  */
1323 static int
1324 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1325                         u32 tx_id, int num_txqs)
1326 {
1327         int i;
1328         int err;
1329         int vector_num;
1330
1331         for (i = 0; i < num_txqs; i++) {
1332                 vector_num = tx_info->tcb[i]->intr_vector;
1333                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1334                                 tx_id + tx_info->tcb[i]->id);
1335                 err = request_irq(bnad->msix_table[vector_num].vector,
1336                                   (irq_handler_t)bnad_msix_tx, 0,
1337                                   tx_info->tcb[i]->name,
1338                                   tx_info->tcb[i]);
1339                 if (err)
1340                         goto err_return;
1341         }
1342
1343         return 0;
1344
1345 err_return:
1346         if (i > 0)
1347                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1348         return -1;
1349 }
1350
1351 /**
1352  * NOTE: Should be called for MSIX only
1353  * Unregisters Rx MSIX vector(s) from the kernel
1354  */
1355 static void
1356 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1357                         int num_rxps)
1358 {
1359         int i;
1360         int vector_num;
1361
1362         for (i = 0; i < num_rxps; i++) {
1363                 if (rx_info->rx_ctrl[i].ccb == NULL)
1364                         continue;
1365
1366                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1367                 free_irq(bnad->msix_table[vector_num].vector,
1368                          rx_info->rx_ctrl[i].ccb);
1369         }
1370 }
1371
1372 /**
1373  * NOTE: Should be called for MSIX only
1374  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1375  */
1376 static int
1377 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1378                         u32 rx_id, int num_rxps)
1379 {
1380         int i;
1381         int err;
1382         int vector_num;
1383
1384         for (i = 0; i < num_rxps; i++) {
1385                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1386                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1387                         bnad->netdev->name,
1388                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1389                 err = request_irq(bnad->msix_table[vector_num].vector,
1390                                   (irq_handler_t)bnad_msix_rx, 0,
1391                                   rx_info->rx_ctrl[i].ccb->name,
1392                                   rx_info->rx_ctrl[i].ccb);
1393                 if (err)
1394                         goto err_return;
1395         }
1396
1397         return 0;
1398
1399 err_return:
1400         if (i > 0)
1401                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1402         return -1;
1403 }
1404
1405 /* Free Tx object Resources */
1406 static void
1407 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1408 {
1409         int i;
1410
1411         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1412                 if (res_info[i].res_type == BNA_RES_T_MEM)
1413                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1414                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1415                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1416         }
1417 }
1418
1419 /* Allocates memory and interrupt resources for Tx object */
1420 static int
1421 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1422                   u32 tx_id)
1423 {
1424         int i, err = 0;
1425
1426         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1427                 if (res_info[i].res_type == BNA_RES_T_MEM)
1428                         err = bnad_mem_alloc(bnad,
1429                                         &res_info[i].res_u.mem_info);
1430                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1431                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1432                                         &res_info[i].res_u.intr_info);
1433                 if (err)
1434                         goto err_return;
1435         }
1436         return 0;
1437
1438 err_return:
1439         bnad_tx_res_free(bnad, res_info);
1440         return err;
1441 }
1442
1443 /* Free Rx object Resources */
1444 static void
1445 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1446 {
1447         int i;
1448
1449         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1450                 if (res_info[i].res_type == BNA_RES_T_MEM)
1451                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1452                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1453                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1454         }
1455 }
1456
1457 /* Allocates memory and interrupt resources for Rx object */
1458 static int
1459 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1460                   uint rx_id)
1461 {
1462         int i, err = 0;
1463
1464         /* All memory needs to be allocated before setup_ccbs */
1465         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1466                 if (res_info[i].res_type == BNA_RES_T_MEM)
1467                         err = bnad_mem_alloc(bnad,
1468                                         &res_info[i].res_u.mem_info);
1469                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1470                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1471                                         &res_info[i].res_u.intr_info);
1472                 if (err)
1473                         goto err_return;
1474         }
1475         return 0;
1476
1477 err_return:
1478         bnad_rx_res_free(bnad, res_info);
1479         return err;
1480 }
1481
1482 /* Timer callbacks */
1483 /* a) IOC timer */
1484 static void
1485 bnad_ioc_timeout(unsigned long data)
1486 {
1487         struct bnad *bnad = (struct bnad *)data;
1488         unsigned long flags;
1489
1490         spin_lock_irqsave(&bnad->bna_lock, flags);
1491         bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1492         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1493 }
1494
1495 static void
1496 bnad_ioc_hb_check(unsigned long data)
1497 {
1498         struct bnad *bnad = (struct bnad *)data;
1499         unsigned long flags;
1500
1501         spin_lock_irqsave(&bnad->bna_lock, flags);
1502         bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1503         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1504 }
1505
1506 static void
1507 bnad_iocpf_timeout(unsigned long data)
1508 {
1509         struct bnad *bnad = (struct bnad *)data;
1510         unsigned long flags;
1511
1512         spin_lock_irqsave(&bnad->bna_lock, flags);
1513         bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1514         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1515 }
1516
1517 static void
1518 bnad_iocpf_sem_timeout(unsigned long data)
1519 {
1520         struct bnad *bnad = (struct bnad *)data;
1521         unsigned long flags;
1522
1523         spin_lock_irqsave(&bnad->bna_lock, flags);
1524         bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1525         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1526 }
1527
1528 /*
1529  * All timer routines use bnad->bna_lock to protect against
1530  * the following race, which may occur in case of no locking:
1531  *      Time    CPU m   CPU n
1532  *      0       1 = test_bit
1533  *      1                       clear_bit
1534  *      2                       del_timer_sync
1535  *      3       mod_timer
1536  */
1537
1538 /* b) Dynamic Interrupt Moderation Timer */
1539 static void
1540 bnad_dim_timeout(unsigned long data)
1541 {
1542         struct bnad *bnad = (struct bnad *)data;
1543         struct bnad_rx_info *rx_info;
1544         struct bnad_rx_ctrl *rx_ctrl;
1545         int i, j;
1546         unsigned long flags;
1547
1548         if (!netif_carrier_ok(bnad->netdev))
1549                 return;
1550
1551         spin_lock_irqsave(&bnad->bna_lock, flags);
1552         for (i = 0; i < bnad->num_rx; i++) {
1553                 rx_info = &bnad->rx_info[i];
1554                 if (!rx_info->rx)
1555                         continue;
1556                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1557                         rx_ctrl = &rx_info->rx_ctrl[j];
1558                         if (!rx_ctrl->ccb)
1559                                 continue;
1560                         bna_rx_dim_update(rx_ctrl->ccb);
1561                 }
1562         }
1563
1564         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1565         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1566                 mod_timer(&bnad->dim_timer,
1567                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1568         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1569 }
1570
1571 /* c)  Statistics Timer */
1572 static void
1573 bnad_stats_timeout(unsigned long data)
1574 {
1575         struct bnad *bnad = (struct bnad *)data;
1576         unsigned long flags;
1577
1578         if (!netif_running(bnad->netdev) ||
1579                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1580                 return;
1581
1582         spin_lock_irqsave(&bnad->bna_lock, flags);
1583         bna_hw_stats_get(&bnad->bna);
1584         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1585 }
1586
1587 /*
1588  * Set up timer for DIM
1589  * Called with bnad->bna_lock held
1590  */
1591 void
1592 bnad_dim_timer_start(struct bnad *bnad)
1593 {
1594         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1595             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1596                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1597                             (unsigned long)bnad);
1598                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1599                 mod_timer(&bnad->dim_timer,
1600                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1601         }
1602 }
1603
1604 /*
1605  * Set up timer for statistics
1606  * Called with mutex_lock(&bnad->conf_mutex) held
1607  */
1608 static void
1609 bnad_stats_timer_start(struct bnad *bnad)
1610 {
1611         unsigned long flags;
1612
1613         spin_lock_irqsave(&bnad->bna_lock, flags);
1614         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1615                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1616                             (unsigned long)bnad);
1617                 mod_timer(&bnad->stats_timer,
1618                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1619         }
1620         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1621 }
1622
1623 /*
1624  * Stops the stats timer
1625  * Called with mutex_lock(&bnad->conf_mutex) held
1626  */
1627 static void
1628 bnad_stats_timer_stop(struct bnad *bnad)
1629 {
1630         int to_del = 0;
1631         unsigned long flags;
1632
1633         spin_lock_irqsave(&bnad->bna_lock, flags);
1634         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1635                 to_del = 1;
1636         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1637         if (to_del)
1638                 del_timer_sync(&bnad->stats_timer);
1639 }
1640
1641 /* Utilities */
1642
1643 static void
1644 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1645 {
1646         int i = 1; /* Index 0 has broadcast address */
1647         struct netdev_hw_addr *mc_addr;
1648
1649         netdev_for_each_mc_addr(mc_addr, netdev) {
1650                 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1651                                                         ETH_ALEN);
1652                 i++;
1653         }
1654 }
1655
1656 static int
1657 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1658 {
1659         struct bnad_rx_ctrl *rx_ctrl =
1660                 container_of(napi, struct bnad_rx_ctrl, napi);
1661         struct bna_ccb *ccb;
1662         struct bnad *bnad;
1663         int rcvd = 0;
1664
1665         ccb = rx_ctrl->ccb;
1666
1667         bnad = ccb->bnad;
1668
1669         if (!netif_carrier_ok(bnad->netdev))
1670                 goto poll_exit;
1671
1672         rcvd = bnad_poll_cq(bnad, ccb, budget);
1673         if (rcvd == budget)
1674                 return rcvd;
1675
1676 poll_exit:
1677         napi_complete((napi));
1678
1679         BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1680
1681         bnad_enable_rx_irq(bnad, ccb);
1682         return rcvd;
1683 }
1684
1685 static void
1686 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1687 {
1688         struct bnad_rx_ctrl *rx_ctrl;
1689         int i;
1690
1691         /* Initialize & enable NAPI */
1692         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1693                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1694
1695                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1696                                bnad_napi_poll_rx, 64);
1697
1698                 napi_enable(&rx_ctrl->napi);
1699         }
1700 }
1701
1702 static void
1703 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1704 {
1705         int i;
1706
1707         /* First disable and then clean up */
1708         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1709                 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1710                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1711         }
1712 }
1713
1714 /* Should be held with conf_lock held */
1715 void
1716 bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1717 {
1718         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1719         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1720         unsigned long flags;
1721
1722         if (!tx_info->tx)
1723                 return;
1724
1725         init_completion(&bnad->bnad_completions.tx_comp);
1726         spin_lock_irqsave(&bnad->bna_lock, flags);
1727         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1728         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729         wait_for_completion(&bnad->bnad_completions.tx_comp);
1730
1731         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1732                 bnad_tx_msix_unregister(bnad, tx_info,
1733                         bnad->num_txq_per_tx);
1734
1735         spin_lock_irqsave(&bnad->bna_lock, flags);
1736         bna_tx_destroy(tx_info->tx);
1737         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1738
1739         tx_info->tx = NULL;
1740         tx_info->tx_id = 0;
1741
1742         if (0 == tx_id)
1743                 tasklet_kill(&bnad->tx_free_tasklet);
1744
1745         bnad_tx_res_free(bnad, res_info);
1746 }
1747
1748 /* Should be held with conf_lock held */
1749 int
1750 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1751 {
1752         int err;
1753         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1754         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1755         struct bna_intr_info *intr_info =
1756                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1757         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1758         struct bna_tx_event_cbfn tx_cbfn;
1759         struct bna_tx *tx;
1760         unsigned long flags;
1761
1762         tx_info->tx_id = tx_id;
1763
1764         /* Initialize the Tx object configuration */
1765         tx_config->num_txq = bnad->num_txq_per_tx;
1766         tx_config->txq_depth = bnad->txq_depth;
1767         tx_config->tx_type = BNA_TX_T_REGULAR;
1768         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1769
1770         /* Initialize the tx event handlers */
1771         tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1772         tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1773         tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1774         tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1775         tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1776
1777         /* Get BNA's resource requirement for one tx object */
1778         spin_lock_irqsave(&bnad->bna_lock, flags);
1779         bna_tx_res_req(bnad->num_txq_per_tx,
1780                 bnad->txq_depth, res_info);
1781         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1782
1783         /* Fill Unmap Q memory requirements */
1784         BNAD_FILL_UNMAPQ_MEM_REQ(
1785                         &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1786                         bnad->num_txq_per_tx,
1787                         BNAD_TX_UNMAPQ_DEPTH);
1788
1789         /* Allocate resources */
1790         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1791         if (err)
1792                 return err;
1793
1794         /* Ask BNA to create one Tx object, supplying required resources */
1795         spin_lock_irqsave(&bnad->bna_lock, flags);
1796         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1797                         tx_info);
1798         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1799         if (!tx)
1800                 goto err_return;
1801         tx_info->tx = tx;
1802
1803         /* Register ISR for the Tx object */
1804         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1805                 err = bnad_tx_msix_register(bnad, tx_info,
1806                         tx_id, bnad->num_txq_per_tx);
1807                 if (err)
1808                         goto err_return;
1809         }
1810
1811         spin_lock_irqsave(&bnad->bna_lock, flags);
1812         bna_tx_enable(tx);
1813         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1814
1815         return 0;
1816
1817 err_return:
1818         bnad_tx_res_free(bnad, res_info);
1819         return err;
1820 }
1821
1822 /* Setup the rx config for bna_rx_create */
1823 /* bnad decides the configuration */
1824 static void
1825 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1826 {
1827         rx_config->rx_type = BNA_RX_T_REGULAR;
1828         rx_config->num_paths = bnad->num_rxp_per_rx;
1829         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1830
1831         if (bnad->num_rxp_per_rx > 1) {
1832                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1833                 rx_config->rss_config.hash_type =
1834                                 (BFI_ENET_RSS_IPV6 |
1835                                  BFI_ENET_RSS_IPV6_TCP |
1836                                  BFI_ENET_RSS_IPV4 |
1837                                  BFI_ENET_RSS_IPV4_TCP);
1838                 rx_config->rss_config.hash_mask =
1839                                 bnad->num_rxp_per_rx - 1;
1840                 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1841                         sizeof(rx_config->rss_config.toeplitz_hash_key));
1842         } else {
1843                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1844                 memset(&rx_config->rss_config, 0,
1845                        sizeof(rx_config->rss_config));
1846         }
1847         rx_config->rxp_type = BNA_RXP_SLR;
1848         rx_config->q_depth = bnad->rxq_depth;
1849
1850         rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1851
1852         rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1853 }
1854
1855 /* Called with mutex_lock(&bnad->conf_mutex) held */
1856 void
1857 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1858 {
1859         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1860         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1861         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1862         unsigned long flags;
1863         int dim_timer_del = 0;
1864
1865         if (!rx_info->rx)
1866                 return;
1867
1868         if (0 == rx_id) {
1869                 spin_lock_irqsave(&bnad->bna_lock, flags);
1870                 dim_timer_del = bnad_dim_timer_running(bnad);
1871                 if (dim_timer_del)
1872                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1873                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1874                 if (dim_timer_del)
1875                         del_timer_sync(&bnad->dim_timer);
1876         }
1877
1878         bnad_napi_disable(bnad, rx_id);
1879
1880         init_completion(&bnad->bnad_completions.rx_comp);
1881         spin_lock_irqsave(&bnad->bna_lock, flags);
1882         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1883         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1884         wait_for_completion(&bnad->bnad_completions.rx_comp);
1885
1886         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1887                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1888
1889         spin_lock_irqsave(&bnad->bna_lock, flags);
1890         bna_rx_destroy(rx_info->rx);
1891         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1892
1893         rx_info->rx = NULL;
1894
1895         bnad_rx_res_free(bnad, res_info);
1896 }
1897
1898 /* Called with mutex_lock(&bnad->conf_mutex) held */
1899 int
1900 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1901 {
1902         int err;
1903         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1904         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1905         struct bna_intr_info *intr_info =
1906                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1907         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1908         struct bna_rx_event_cbfn rx_cbfn;
1909         struct bna_rx *rx;
1910         unsigned long flags;
1911
1912         rx_info->rx_id = rx_id;
1913
1914         /* Initialize the Rx object configuration */
1915         bnad_init_rx_config(bnad, rx_config);
1916
1917         /* Initialize the Rx event handlers */
1918         rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1919         rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1920         rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1921         rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1922         rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1923         rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1924
1925         /* Get BNA's resource requirement for one Rx object */
1926         spin_lock_irqsave(&bnad->bna_lock, flags);
1927         bna_rx_res_req(rx_config, res_info);
1928         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1929
1930         /* Fill Unmap Q memory requirements */
1931         BNAD_FILL_UNMAPQ_MEM_REQ(
1932                         &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1933                         rx_config->num_paths +
1934                         ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1935                                 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1936
1937         /* Allocate resource */
1938         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1939         if (err)
1940                 return err;
1941
1942         /* Ask BNA to create one Rx object, supplying required resources */
1943         spin_lock_irqsave(&bnad->bna_lock, flags);
1944         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1945                         rx_info);
1946         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1947         if (!rx)
1948                 goto err_return;
1949         rx_info->rx = rx;
1950
1951         /* Register ISR for the Rx object */
1952         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1953                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1954                                                 rx_config->num_paths);
1955                 if (err)
1956                         goto err_return;
1957         }
1958
1959         /* Enable NAPI */
1960         bnad_napi_enable(bnad, rx_id);
1961
1962         spin_lock_irqsave(&bnad->bna_lock, flags);
1963         if (0 == rx_id) {
1964                 /* Set up Dynamic Interrupt Moderation Vector */
1965                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1966                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1967
1968                 /* Enable VLAN filtering only on the default Rx */
1969                 bna_rx_vlanfilter_enable(rx);
1970
1971                 /* Start the DIM timer */
1972                 bnad_dim_timer_start(bnad);
1973         }
1974
1975         bna_rx_enable(rx);
1976         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1977
1978         return 0;
1979
1980 err_return:
1981         bnad_cleanup_rx(bnad, rx_id);
1982         return err;
1983 }
1984
1985 /* Called with conf_lock & bnad->bna_lock held */
1986 void
1987 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1988 {
1989         struct bnad_tx_info *tx_info;
1990
1991         tx_info = &bnad->tx_info[0];
1992         if (!tx_info->tx)
1993                 return;
1994
1995         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1996 }
1997
1998 /* Called with conf_lock & bnad->bna_lock held */
1999 void
2000 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2001 {
2002         struct bnad_rx_info *rx_info;
2003         int     i;
2004
2005         for (i = 0; i < bnad->num_rx; i++) {
2006                 rx_info = &bnad->rx_info[i];
2007                 if (!rx_info->rx)
2008                         continue;
2009                 bna_rx_coalescing_timeo_set(rx_info->rx,
2010                                 bnad->rx_coalescing_timeo);
2011         }
2012 }
2013
2014 /*
2015  * Called with bnad->bna_lock held
2016  */
2017 static int
2018 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2019 {
2020         int ret;
2021
2022         if (!is_valid_ether_addr(mac_addr))
2023                 return -EADDRNOTAVAIL;
2024
2025         /* If datapath is down, pretend everything went through */
2026         if (!bnad->rx_info[0].rx)
2027                 return 0;
2028
2029         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2030         if (ret != BNA_CB_SUCCESS)
2031                 return -EADDRNOTAVAIL;
2032
2033         return 0;
2034 }
2035
2036 /* Should be called with conf_lock held */
2037 static int
2038 bnad_enable_default_bcast(struct bnad *bnad)
2039 {
2040         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2041         int ret;
2042         unsigned long flags;
2043
2044         init_completion(&bnad->bnad_completions.mcast_comp);
2045
2046         spin_lock_irqsave(&bnad->bna_lock, flags);
2047         ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2048                                 bnad_cb_rx_mcast_add);
2049         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2050
2051         if (ret == BNA_CB_SUCCESS)
2052                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2053         else
2054                 return -ENODEV;
2055
2056         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2057                 return -ENODEV;
2058
2059         return 0;
2060 }
2061
2062 /* Called with bnad_conf_lock() held */
2063 static void
2064 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2065 {
2066         u16 vid;
2067         unsigned long flags;
2068
2069         BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
2070
2071         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2072                 spin_lock_irqsave(&bnad->bna_lock, flags);
2073                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2074                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2075         }
2076 }
2077
2078 /* Statistics utilities */
2079 void
2080 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2081 {
2082         int i, j;
2083
2084         for (i = 0; i < bnad->num_rx; i++) {
2085                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2086                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2087                                 stats->rx_packets += bnad->rx_info[i].
2088                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2089                                 stats->rx_bytes += bnad->rx_info[i].
2090                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2091                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2092                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2093                                         rcb[1]->rxq) {
2094                                         stats->rx_packets +=
2095                                                 bnad->rx_info[i].rx_ctrl[j].
2096                                                 ccb->rcb[1]->rxq->rx_packets;
2097                                         stats->rx_bytes +=
2098                                                 bnad->rx_info[i].rx_ctrl[j].
2099                                                 ccb->rcb[1]->rxq->rx_bytes;
2100                                 }
2101                         }
2102                 }
2103         }
2104         for (i = 0; i < bnad->num_tx; i++) {
2105                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2106                         if (bnad->tx_info[i].tcb[j]) {
2107                                 stats->tx_packets +=
2108                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2109                                 stats->tx_bytes +=
2110                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2111                         }
2112                 }
2113         }
2114 }
2115
2116 /*
2117  * Must be called with the bna_lock held.
2118  */
2119 void
2120 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2121 {
2122         struct bfi_enet_stats_mac *mac_stats;
2123         u32 bmap;
2124         int i;
2125
2126         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2127         stats->rx_errors =
2128                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2129                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2130                 mac_stats->rx_undersize;
2131         stats->tx_errors = mac_stats->tx_fcs_error +
2132                                         mac_stats->tx_undersize;
2133         stats->rx_dropped = mac_stats->rx_drop;
2134         stats->tx_dropped = mac_stats->tx_drop;
2135         stats->multicast = mac_stats->rx_multicast;
2136         stats->collisions = mac_stats->tx_total_collision;
2137
2138         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2139
2140         /* receive ring buffer overflow  ?? */
2141
2142         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2143         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2144         /* recv'r fifo overrun */
2145         bmap = bna_rx_rid_mask(&bnad->bna);
2146         for (i = 0; bmap; i++) {
2147                 if (bmap & 1) {
2148                         stats->rx_fifo_errors +=
2149                                 bnad->stats.bna_stats->
2150                                         hw_stats.rxf_stats[i].frame_drops;
2151                         break;
2152                 }
2153                 bmap >>= 1;
2154         }
2155 }
2156
2157 static void
2158 bnad_mbox_irq_sync(struct bnad *bnad)
2159 {
2160         u32 irq;
2161         unsigned long flags;
2162
2163         spin_lock_irqsave(&bnad->bna_lock, flags);
2164         if (bnad->cfg_flags & BNAD_CF_MSIX)
2165                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2166         else
2167                 irq = bnad->pcidev->irq;
2168         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2169
2170         synchronize_irq(irq);
2171 }
2172
2173 /* Utility used by bnad_start_xmit, for doing TSO */
2174 static int
2175 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2176 {
2177         int err;
2178
2179         /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2180         BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2181                    skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2182         if (skb_header_cloned(skb)) {
2183                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2184                 if (err) {
2185                         BNAD_UPDATE_CTR(bnad, tso_err);
2186                         return err;
2187                 }
2188         }
2189
2190         /*
2191          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2192          * excluding the length field.
2193          */
2194         if (skb->protocol == htons(ETH_P_IP)) {
2195                 struct iphdr *iph = ip_hdr(skb);
2196
2197                 /* Do we really need these? */
2198                 iph->tot_len = 0;
2199                 iph->check = 0;
2200
2201                 tcp_hdr(skb)->check =
2202                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2203                                            IPPROTO_TCP, 0);
2204                 BNAD_UPDATE_CTR(bnad, tso4);
2205         } else {
2206                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2207
2208                 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2209                 ipv6h->payload_len = 0;
2210                 tcp_hdr(skb)->check =
2211                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2212                                          IPPROTO_TCP, 0);
2213                 BNAD_UPDATE_CTR(bnad, tso6);
2214         }
2215
2216         return 0;
2217 }
2218
2219 /*
2220  * Initialize Q numbers depending on Rx Paths
2221  * Called with bnad->bna_lock held, because of cfg_flags
2222  * access.
2223  */
2224 static void
2225 bnad_q_num_init(struct bnad *bnad)
2226 {
2227         int rxps;
2228
2229         rxps = min((uint)num_online_cpus(),
2230                         (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2231
2232         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2233                 rxps = 1;       /* INTx */
2234
2235         bnad->num_rx = 1;
2236         bnad->num_tx = 1;
2237         bnad->num_rxp_per_rx = rxps;
2238         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2239 }
2240
2241 /*
2242  * Adjusts the Q numbers, given a number of msix vectors
2243  * Give preference to RSS as opposed to Tx priority Queues,
2244  * in such a case, just use 1 Tx Q
2245  * Called with bnad->bna_lock held b'cos of cfg_flags access
2246  */
2247 static void
2248 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2249 {
2250         bnad->num_txq_per_tx = 1;
2251         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2252              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2253             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2254                 bnad->num_rxp_per_rx = msix_vectors -
2255                         (bnad->num_tx * bnad->num_txq_per_tx) -
2256                         BNAD_MAILBOX_MSIX_VECTORS;
2257         } else
2258                 bnad->num_rxp_per_rx = 1;
2259 }
2260
2261 /* Enable / disable ioceth */
2262 static int
2263 bnad_ioceth_disable(struct bnad *bnad)
2264 {
2265         unsigned long flags;
2266         int err = 0;
2267
2268         spin_lock_irqsave(&bnad->bna_lock, flags);
2269         init_completion(&bnad->bnad_completions.ioc_comp);
2270         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2271         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2272
2273         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2274                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2275
2276         err = bnad->bnad_completions.ioc_comp_status;
2277         return err;
2278 }
2279
2280 static int
2281 bnad_ioceth_enable(struct bnad *bnad)
2282 {
2283         int err = 0;
2284         unsigned long flags;
2285
2286         spin_lock_irqsave(&bnad->bna_lock, flags);
2287         init_completion(&bnad->bnad_completions.ioc_comp);
2288         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2289         bna_ioceth_enable(&bnad->bna.ioceth);
2290         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2291
2292         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2293                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2294
2295         err = bnad->bnad_completions.ioc_comp_status;
2296
2297         return err;
2298 }
2299
2300 /* Free BNA resources */
2301 static void
2302 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2303                 u32 res_val_max)
2304 {
2305         int i;
2306
2307         for (i = 0; i < res_val_max; i++)
2308                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2309 }
2310
2311 /* Allocates memory and interrupt resources for BNA */
2312 static int
2313 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2314                 u32 res_val_max)
2315 {
2316         int i, err;
2317
2318         for (i = 0; i < res_val_max; i++) {
2319                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2320                 if (err)
2321                         goto err_return;
2322         }
2323         return 0;
2324
2325 err_return:
2326         bnad_res_free(bnad, res_info, res_val_max);
2327         return err;
2328 }
2329
2330 /* Interrupt enable / disable */
2331 static void
2332 bnad_enable_msix(struct bnad *bnad)
2333 {
2334         int i, ret;
2335         unsigned long flags;
2336
2337         spin_lock_irqsave(&bnad->bna_lock, flags);
2338         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2339                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2340                 return;
2341         }
2342         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2343
2344         if (bnad->msix_table)
2345                 return;
2346
2347         bnad->msix_table =
2348                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2349
2350         if (!bnad->msix_table)
2351                 goto intx_mode;
2352
2353         for (i = 0; i < bnad->msix_num; i++)
2354                 bnad->msix_table[i].entry = i;
2355
2356         ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2357         if (ret > 0) {
2358                 /* Not enough MSI-X vectors. */
2359
2360                 spin_lock_irqsave(&bnad->bna_lock, flags);
2361                 /* ret = #of vectors that we got */
2362                 bnad_q_num_adjust(bnad, ret, 0);
2363                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2364
2365                 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2366                         + (bnad->num_rx
2367                         * bnad->num_rxp_per_rx) +
2368                          BNAD_MAILBOX_MSIX_VECTORS;
2369
2370                 if (bnad->msix_num > ret)
2371                         goto intx_mode;
2372
2373                 /* Try once more with adjusted numbers */
2374                 /* If this fails, fall back to INTx */
2375                 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2376                                       bnad->msix_num);
2377                 if (ret)
2378                         goto intx_mode;
2379
2380         } else if (ret < 0)
2381                 goto intx_mode;
2382
2383         pci_intx(bnad->pcidev, 0);
2384
2385         return;
2386
2387 intx_mode:
2388
2389         kfree(bnad->msix_table);
2390         bnad->msix_table = NULL;
2391         bnad->msix_num = 0;
2392         spin_lock_irqsave(&bnad->bna_lock, flags);
2393         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2394         bnad_q_num_init(bnad);
2395         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2396 }
2397
2398 static void
2399 bnad_disable_msix(struct bnad *bnad)
2400 {
2401         u32 cfg_flags;
2402         unsigned long flags;
2403
2404         spin_lock_irqsave(&bnad->bna_lock, flags);
2405         cfg_flags = bnad->cfg_flags;
2406         if (bnad->cfg_flags & BNAD_CF_MSIX)
2407                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2408         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2409
2410         if (cfg_flags & BNAD_CF_MSIX) {
2411                 pci_disable_msix(bnad->pcidev);
2412                 kfree(bnad->msix_table);
2413                 bnad->msix_table = NULL;
2414         }
2415 }
2416
2417 /* Netdev entry points */
2418 static int
2419 bnad_open(struct net_device *netdev)
2420 {
2421         int err;
2422         struct bnad *bnad = netdev_priv(netdev);
2423         struct bna_pause_config pause_config;
2424         int mtu;
2425         unsigned long flags;
2426
2427         mutex_lock(&bnad->conf_mutex);
2428
2429         /* Tx */
2430         err = bnad_setup_tx(bnad, 0);
2431         if (err)
2432                 goto err_return;
2433
2434         /* Rx */
2435         err = bnad_setup_rx(bnad, 0);
2436         if (err)
2437                 goto cleanup_tx;
2438
2439         /* Port */
2440         pause_config.tx_pause = 0;
2441         pause_config.rx_pause = 0;
2442
2443         mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2444
2445         spin_lock_irqsave(&bnad->bna_lock, flags);
2446         bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2447         bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2448         bna_enet_enable(&bnad->bna.enet);
2449         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2450
2451         /* Enable broadcast */
2452         bnad_enable_default_bcast(bnad);
2453
2454         /* Restore VLANs, if any */
2455         bnad_restore_vlans(bnad, 0);
2456
2457         /* Set the UCAST address */
2458         spin_lock_irqsave(&bnad->bna_lock, flags);
2459         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2460         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2461
2462         /* Start the stats timer */
2463         bnad_stats_timer_start(bnad);
2464
2465         mutex_unlock(&bnad->conf_mutex);
2466
2467         return 0;
2468
2469 cleanup_tx:
2470         bnad_cleanup_tx(bnad, 0);
2471
2472 err_return:
2473         mutex_unlock(&bnad->conf_mutex);
2474         return err;
2475 }
2476
2477 static int
2478 bnad_stop(struct net_device *netdev)
2479 {
2480         struct bnad *bnad = netdev_priv(netdev);
2481         unsigned long flags;
2482
2483         mutex_lock(&bnad->conf_mutex);
2484
2485         /* Stop the stats timer */
2486         bnad_stats_timer_stop(bnad);
2487
2488         init_completion(&bnad->bnad_completions.enet_comp);
2489
2490         spin_lock_irqsave(&bnad->bna_lock, flags);
2491         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2492                         bnad_cb_enet_disabled);
2493         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2494
2495         wait_for_completion(&bnad->bnad_completions.enet_comp);
2496
2497         bnad_cleanup_tx(bnad, 0);
2498         bnad_cleanup_rx(bnad, 0);
2499
2500         /* Synchronize mailbox IRQ */
2501         bnad_mbox_irq_sync(bnad);
2502
2503         mutex_unlock(&bnad->conf_mutex);
2504
2505         return 0;
2506 }
2507
2508 /* TX */
2509 /*
2510  * bnad_start_xmit : Netdev entry point for Transmit
2511  *                   Called under lock held by net_device
2512  */
2513 static netdev_tx_t
2514 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2515 {
2516         struct bnad *bnad = netdev_priv(netdev);
2517         u32 txq_id = 0;
2518         struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2519
2520         u16             txq_prod, vlan_tag = 0;
2521         u32             unmap_prod, wis, wis_used, wi_range;
2522         u32             vectors, vect_id, i, acked;
2523         int                     err;
2524
2525         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2526         dma_addr_t              dma_addr;
2527         struct bna_txq_entry *txqent;
2528         u16     flags;
2529
2530         if (unlikely
2531             (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2532                 dev_kfree_skb(skb);
2533                 return NETDEV_TX_OK;
2534         }
2535
2536         /*
2537          * Takes care of the Tx that is scheduled between clearing the flag
2538          * and the netif_stop_all_queue() call.
2539          */
2540         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2541                 dev_kfree_skb(skb);
2542                 return NETDEV_TX_OK;
2543         }
2544
2545         vectors = 1 + skb_shinfo(skb)->nr_frags;
2546         if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2547                 dev_kfree_skb(skb);
2548                 return NETDEV_TX_OK;
2549         }
2550         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2551         acked = 0;
2552         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2553                         vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2554                 if ((u16) (*tcb->hw_consumer_index) !=
2555                     tcb->consumer_index &&
2556                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2557                         acked = bnad_free_txbufs(bnad, tcb);
2558                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2559                                 bna_ib_ack(tcb->i_dbell, acked);
2560                         smp_mb__before_clear_bit();
2561                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2562                 } else {
2563                         netif_stop_queue(netdev);
2564                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2565                 }
2566
2567                 smp_mb();
2568                 /*
2569                  * Check again to deal with race condition between
2570                  * netif_stop_queue here, and netif_wake_queue in
2571                  * interrupt handler which is not inside netif tx lock.
2572                  */
2573                 if (likely
2574                     (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2575                      vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2576                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2577                         return NETDEV_TX_BUSY;
2578                 } else {
2579                         netif_wake_queue(netdev);
2580                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2581                 }
2582         }
2583
2584         unmap_prod = unmap_q->producer_index;
2585         wis_used = 1;
2586         vect_id = 0;
2587         flags = 0;
2588
2589         txq_prod = tcb->producer_index;
2590         BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2591         BUG_ON(!(wi_range <= tcb->q_depth));
2592         txqent->hdr.wi.reserved = 0;
2593         txqent->hdr.wi.num_vectors = vectors;
2594         txqent->hdr.wi.opcode =
2595                 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2596                        BNA_TXQ_WI_SEND));
2597
2598         if (vlan_tx_tag_present(skb)) {
2599                 vlan_tag = (u16) vlan_tx_tag_get(skb);
2600                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2601         }
2602         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2603                 vlan_tag =
2604                         (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2605                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2606         }
2607
2608         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2609
2610         if (skb_is_gso(skb)) {
2611                 err = bnad_tso_prepare(bnad, skb);
2612                 if (err) {
2613                         dev_kfree_skb(skb);
2614                         return NETDEV_TX_OK;
2615                 }
2616                 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2617                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2618                 txqent->hdr.wi.l4_hdr_size_n_offset =
2619                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2620                               (tcp_hdrlen(skb) >> 2,
2621                                skb_transport_offset(skb)));
2622         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2623                 u8 proto = 0;
2624
2625                 txqent->hdr.wi.lso_mss = 0;
2626
2627                 if (skb->protocol == htons(ETH_P_IP))
2628                         proto = ip_hdr(skb)->protocol;
2629                 else if (skb->protocol == htons(ETH_P_IPV6)) {
2630                         /* nexthdr may not be TCP immediately. */
2631                         proto = ipv6_hdr(skb)->nexthdr;
2632                 }
2633                 if (proto == IPPROTO_TCP) {
2634                         flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2635                         txqent->hdr.wi.l4_hdr_size_n_offset =
2636                                 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2637                                       (0, skb_transport_offset(skb)));
2638
2639                         BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2640
2641                         BUG_ON(!(skb_headlen(skb) >=
2642                                 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2643
2644                 } else if (proto == IPPROTO_UDP) {
2645                         flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2646                         txqent->hdr.wi.l4_hdr_size_n_offset =
2647                                 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2648                                       (0, skb_transport_offset(skb)));
2649
2650                         BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2651
2652                         BUG_ON(!(skb_headlen(skb) >=
2653                                    skb_transport_offset(skb) +
2654                                    sizeof(struct udphdr)));
2655                 } else {
2656                         err = skb_checksum_help(skb);
2657                         BNAD_UPDATE_CTR(bnad, csum_help);
2658                         if (err) {
2659                                 dev_kfree_skb(skb);
2660                                 BNAD_UPDATE_CTR(bnad, csum_help_err);
2661                                 return NETDEV_TX_OK;
2662                         }
2663                 }
2664         } else {
2665                 txqent->hdr.wi.lso_mss = 0;
2666                 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2667         }
2668
2669         txqent->hdr.wi.flags = htons(flags);
2670
2671         txqent->hdr.wi.frame_length = htonl(skb->len);
2672
2673         unmap_q->unmap_array[unmap_prod].skb = skb;
2674         BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2675         txqent->vector[vect_id].length = htons(skb_headlen(skb));
2676         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2677                                   skb_headlen(skb), DMA_TO_DEVICE);
2678         dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2679                            dma_addr);
2680
2681         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2682         BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2683
2684         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2685                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2686                 u16             size = frag->size;
2687
2688                 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2689                         vect_id = 0;
2690                         if (--wi_range)
2691                                 txqent++;
2692                         else {
2693                                 BNA_QE_INDX_ADD(txq_prod, wis_used,
2694                                                 tcb->q_depth);
2695                                 wis_used = 0;
2696                                 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2697                                                      txqent, wi_range);
2698                                 BUG_ON(!(wi_range <= tcb->q_depth));
2699                         }
2700                         wis_used++;
2701                         txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2702                 }
2703
2704                 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2705                 txqent->vector[vect_id].length = htons(size);
2706                 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2707                                         frag->page_offset, size, DMA_TO_DEVICE);
2708                 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2709                                    dma_addr);
2710                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2711                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2712         }
2713
2714         unmap_q->producer_index = unmap_prod;
2715         BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2716         tcb->producer_index = txq_prod;
2717
2718         smp_mb();
2719
2720         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2721                 return NETDEV_TX_OK;
2722
2723         bna_txq_prod_indx_doorbell(tcb);
2724
2725         if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2726                 tasklet_schedule(&bnad->tx_free_tasklet);
2727
2728         return NETDEV_TX_OK;
2729 }
2730
2731 /*
2732  * Used spin_lock to synchronize reading of stats structures, which
2733  * is written by BNA under the same lock.
2734  */
2735 static struct rtnl_link_stats64 *
2736 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2737 {
2738         struct bnad *bnad = netdev_priv(netdev);
2739         unsigned long flags;
2740
2741         spin_lock_irqsave(&bnad->bna_lock, flags);
2742
2743         bnad_netdev_qstats_fill(bnad, stats);
2744         bnad_netdev_hwstats_fill(bnad, stats);
2745
2746         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2747
2748         return stats;
2749 }
2750
2751 static void
2752 bnad_set_rx_mode(struct net_device *netdev)
2753 {
2754         struct bnad *bnad = netdev_priv(netdev);
2755         u32     new_mask, valid_mask;
2756         unsigned long flags;
2757
2758         spin_lock_irqsave(&bnad->bna_lock, flags);
2759
2760         new_mask = valid_mask = 0;
2761
2762         if (netdev->flags & IFF_PROMISC) {
2763                 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2764                         new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2765                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2766                         bnad->cfg_flags |= BNAD_CF_PROMISC;
2767                 }
2768         } else {
2769                 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2770                         new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2771                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2772                         bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2773                 }
2774         }
2775
2776         if (netdev->flags & IFF_ALLMULTI) {
2777                 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2778                         new_mask |= BNA_RXMODE_ALLMULTI;
2779                         valid_mask |= BNA_RXMODE_ALLMULTI;
2780                         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2781                 }
2782         } else {
2783                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2784                         new_mask &= ~BNA_RXMODE_ALLMULTI;
2785                         valid_mask |= BNA_RXMODE_ALLMULTI;
2786                         bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2787                 }
2788         }
2789
2790         bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2791
2792         if (!netdev_mc_empty(netdev)) {
2793                 u8 *mcaddr_list;
2794                 int mc_count = netdev_mc_count(netdev);
2795
2796                 /* Index 0 holds the broadcast address */
2797                 mcaddr_list =
2798                         kzalloc((mc_count + 1) * ETH_ALEN,
2799                                 GFP_ATOMIC);
2800                 if (!mcaddr_list)
2801                         goto unlock;
2802
2803                 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2804
2805                 /* Copy rest of the MC addresses */
2806                 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2807
2808                 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2809                                         mcaddr_list, NULL);
2810
2811                 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2812                 kfree(mcaddr_list);
2813         }
2814 unlock:
2815         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2816 }
2817
2818 /*
2819  * bna_lock is used to sync writes to netdev->addr
2820  * conf_lock cannot be used since this call may be made
2821  * in a non-blocking context.
2822  */
2823 static int
2824 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2825 {
2826         int err;
2827         struct bnad *bnad = netdev_priv(netdev);
2828         struct sockaddr *sa = (struct sockaddr *)mac_addr;
2829         unsigned long flags;
2830
2831         spin_lock_irqsave(&bnad->bna_lock, flags);
2832
2833         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2834
2835         if (!err)
2836                 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2837
2838         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2839
2840         return err;
2841 }
2842
2843 static int
2844 bnad_mtu_set(struct bnad *bnad, int mtu)
2845 {
2846         unsigned long flags;
2847
2848         init_completion(&bnad->bnad_completions.mtu_comp);
2849
2850         spin_lock_irqsave(&bnad->bna_lock, flags);
2851         bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2852         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2853
2854         wait_for_completion(&bnad->bnad_completions.mtu_comp);
2855
2856         return bnad->bnad_completions.mtu_comp_status;
2857 }
2858
2859 static int
2860 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2861 {
2862         int err, mtu = netdev->mtu;
2863         struct bnad *bnad = netdev_priv(netdev);
2864
2865         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2866                 return -EINVAL;
2867
2868         mutex_lock(&bnad->conf_mutex);
2869
2870         netdev->mtu = new_mtu;
2871
2872         mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2873         err = bnad_mtu_set(bnad, mtu);
2874         if (err)
2875                 err = -EBUSY;
2876
2877         mutex_unlock(&bnad->conf_mutex);
2878         return err;
2879 }
2880
2881 static void
2882 bnad_vlan_rx_add_vid(struct net_device *netdev,
2883                                  unsigned short vid)
2884 {
2885         struct bnad *bnad = netdev_priv(netdev);
2886         unsigned long flags;
2887
2888         if (!bnad->rx_info[0].rx)
2889                 return;
2890
2891         mutex_lock(&bnad->conf_mutex);
2892
2893         spin_lock_irqsave(&bnad->bna_lock, flags);
2894         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2895         set_bit(vid, bnad->active_vlans);
2896         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2897
2898         mutex_unlock(&bnad->conf_mutex);
2899 }
2900
2901 static void
2902 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2903                                   unsigned short vid)
2904 {
2905         struct bnad *bnad = netdev_priv(netdev);
2906         unsigned long flags;
2907
2908         if (!bnad->rx_info[0].rx)
2909                 return;
2910
2911         mutex_lock(&bnad->conf_mutex);
2912
2913         spin_lock_irqsave(&bnad->bna_lock, flags);
2914         clear_bit(vid, bnad->active_vlans);
2915         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2916         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2917
2918         mutex_unlock(&bnad->conf_mutex);
2919 }
2920
2921 #ifdef CONFIG_NET_POLL_CONTROLLER
2922 static void
2923 bnad_netpoll(struct net_device *netdev)
2924 {
2925         struct bnad *bnad = netdev_priv(netdev);
2926         struct bnad_rx_info *rx_info;
2927         struct bnad_rx_ctrl *rx_ctrl;
2928         u32 curr_mask;
2929         int i, j;
2930
2931         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2932                 bna_intx_disable(&bnad->bna, curr_mask);
2933                 bnad_isr(bnad->pcidev->irq, netdev);
2934                 bna_intx_enable(&bnad->bna, curr_mask);
2935         } else {
2936                 for (i = 0; i < bnad->num_rx; i++) {
2937                         rx_info = &bnad->rx_info[i];
2938                         if (!rx_info->rx)
2939                                 continue;
2940                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2941                                 rx_ctrl = &rx_info->rx_ctrl[j];
2942                                 if (rx_ctrl->ccb) {
2943                                         bnad_disable_rx_irq(bnad,
2944                                                             rx_ctrl->ccb);
2945                                         bnad_netif_rx_schedule_poll(bnad,
2946                                                             rx_ctrl->ccb);
2947                                 }
2948                         }
2949                 }
2950         }
2951 }
2952 #endif
2953
2954 static const struct net_device_ops bnad_netdev_ops = {
2955         .ndo_open               = bnad_open,
2956         .ndo_stop               = bnad_stop,
2957         .ndo_start_xmit         = bnad_start_xmit,
2958         .ndo_get_stats64                = bnad_get_stats64,
2959         .ndo_set_rx_mode        = bnad_set_rx_mode,
2960         .ndo_set_multicast_list = bnad_set_rx_mode,
2961         .ndo_validate_addr      = eth_validate_addr,
2962         .ndo_set_mac_address    = bnad_set_mac_address,
2963         .ndo_change_mtu         = bnad_change_mtu,
2964         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
2965         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
2966 #ifdef CONFIG_NET_POLL_CONTROLLER
2967         .ndo_poll_controller    = bnad_netpoll
2968 #endif
2969 };
2970
2971 static void
2972 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2973 {
2974         struct net_device *netdev = bnad->netdev;
2975
2976         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2977                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2978                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
2979
2980         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2981                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2982                 NETIF_F_TSO | NETIF_F_TSO6;
2983
2984         netdev->features |= netdev->hw_features |
2985                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2986
2987         if (using_dac)
2988                 netdev->features |= NETIF_F_HIGHDMA;
2989
2990         netdev->mem_start = bnad->mmio_start;
2991         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2992
2993         netdev->netdev_ops = &bnad_netdev_ops;
2994         bnad_set_ethtool_ops(netdev);
2995 }
2996
2997 /*
2998  * 1. Initialize the bnad structure
2999  * 2. Setup netdev pointer in pci_dev
3000  * 3. Initialze Tx free tasklet
3001  * 4. Initialize no. of TxQ & CQs & MSIX vectors
3002  */
3003 static int
3004 bnad_init(struct bnad *bnad,
3005           struct pci_dev *pdev, struct net_device *netdev)
3006 {
3007         unsigned long flags;
3008
3009         SET_NETDEV_DEV(netdev, &pdev->dev);
3010         pci_set_drvdata(pdev, netdev);
3011
3012         bnad->netdev = netdev;
3013         bnad->pcidev = pdev;
3014         bnad->mmio_start = pci_resource_start(pdev, 0);
3015         bnad->mmio_len = pci_resource_len(pdev, 0);
3016         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3017         if (!bnad->bar0) {
3018                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3019                 pci_set_drvdata(pdev, NULL);
3020                 return -ENOMEM;
3021         }
3022         pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3023                (unsigned long long) bnad->mmio_len);
3024
3025         spin_lock_irqsave(&bnad->bna_lock, flags);
3026         if (!bnad_msix_disable)
3027                 bnad->cfg_flags = BNAD_CF_MSIX;
3028
3029         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3030
3031         bnad_q_num_init(bnad);
3032         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3033
3034         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3035                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3036                          BNAD_MAILBOX_MSIX_VECTORS;
3037
3038         bnad->txq_depth = BNAD_TXQ_DEPTH;
3039         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3040
3041         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3042         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3043
3044         tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3045                      (unsigned long)bnad);
3046
3047         return 0;
3048 }
3049
3050 /*
3051  * Must be called after bnad_pci_uninit()
3052  * so that iounmap() and pci_set_drvdata(NULL)
3053  * happens only after PCI uninitialization.
3054  */
3055 static void
3056 bnad_uninit(struct bnad *bnad)
3057 {
3058         if (bnad->bar0)
3059                 iounmap(bnad->bar0);
3060         pci_set_drvdata(bnad->pcidev, NULL);
3061 }
3062
3063 /*
3064  * Initialize locks
3065         a) Per ioceth mutes used for serializing configuration
3066            changes from OS interface
3067         b) spin lock used to protect bna state machine
3068  */
3069 static void
3070 bnad_lock_init(struct bnad *bnad)
3071 {
3072         spin_lock_init(&bnad->bna_lock);
3073         mutex_init(&bnad->conf_mutex);
3074 }
3075
3076 static void
3077 bnad_lock_uninit(struct bnad *bnad)
3078 {
3079         mutex_destroy(&bnad->conf_mutex);
3080 }
3081
3082 /* PCI Initialization */
3083 static int
3084 bnad_pci_init(struct bnad *bnad,
3085               struct pci_dev *pdev, bool *using_dac)
3086 {
3087         int err;
3088
3089         err = pci_enable_device(pdev);
3090         if (err)
3091                 return err;
3092         err = pci_request_regions(pdev, BNAD_NAME);
3093         if (err)
3094                 goto disable_device;
3095         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3096             !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3097                 *using_dac = 1;
3098         } else {
3099                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3100                 if (err) {
3101                         err = dma_set_coherent_mask(&pdev->dev,
3102                                                     DMA_BIT_MASK(32));
3103                         if (err)
3104                                 goto release_regions;
3105                 }
3106                 *using_dac = 0;
3107         }
3108         pci_set_master(pdev);
3109         return 0;
3110
3111 release_regions:
3112         pci_release_regions(pdev);
3113 disable_device:
3114         pci_disable_device(pdev);
3115
3116         return err;
3117 }
3118
3119 static void
3120 bnad_pci_uninit(struct pci_dev *pdev)
3121 {
3122         pci_release_regions(pdev);
3123         pci_disable_device(pdev);
3124 }
3125
3126 static int __devinit
3127 bnad_pci_probe(struct pci_dev *pdev,
3128                 const struct pci_device_id *pcidev_id)
3129 {
3130         bool    using_dac = false;
3131         int     err;
3132         struct bnad *bnad;
3133         struct bna *bna;
3134         struct net_device *netdev;
3135         struct bfa_pcidev pcidev_info;
3136         unsigned long flags;
3137
3138         pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3139                pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3140
3141         mutex_lock(&bnad_fwimg_mutex);
3142         if (!cna_get_firmware_buf(pdev)) {
3143                 mutex_unlock(&bnad_fwimg_mutex);
3144                 pr_warn("Failed to load Firmware Image!\n");
3145                 return -ENODEV;
3146         }
3147         mutex_unlock(&bnad_fwimg_mutex);
3148
3149         /*
3150          * Allocates sizeof(struct net_device + struct bnad)
3151          * bnad = netdev->priv
3152          */
3153         netdev = alloc_etherdev(sizeof(struct bnad));
3154         if (!netdev) {
3155                 dev_err(&pdev->dev, "netdev allocation failed\n");
3156                 err = -ENOMEM;
3157                 return err;
3158         }
3159         bnad = netdev_priv(netdev);
3160
3161         bnad_lock_init(bnad);
3162
3163         mutex_lock(&bnad->conf_mutex);
3164         /*
3165          * PCI initialization
3166          *      Output : using_dac = 1 for 64 bit DMA
3167          *                         = 0 for 32 bit DMA
3168          */
3169         err = bnad_pci_init(bnad, pdev, &using_dac);
3170         if (err)
3171                 goto free_netdev;
3172
3173         /*
3174          * Initialize bnad structure
3175          * Setup relation between pci_dev & netdev
3176          * Init Tx free tasklet
3177          */
3178         err = bnad_init(bnad, pdev, netdev);
3179         if (err)
3180                 goto pci_uninit;
3181
3182         /* Initialize netdev structure, set up ethtool ops */
3183         bnad_netdev_init(bnad, using_dac);
3184
3185         /* Set link to down state */
3186         netif_carrier_off(netdev);
3187
3188         /* Get resource requirement form bna */
3189         spin_lock_irqsave(&bnad->bna_lock, flags);
3190         bna_res_req(&bnad->res_info[0]);
3191         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3192
3193         /* Allocate resources from bna */
3194         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3195         if (err)
3196                 goto drv_uninit;
3197
3198         bna = &bnad->bna;
3199
3200         /* Setup pcidev_info for bna_init() */
3201         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3202         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3203         pcidev_info.device_id = bnad->pcidev->device;
3204         pcidev_info.pci_bar_kva = bnad->bar0;
3205
3206         spin_lock_irqsave(&bnad->bna_lock, flags);
3207         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3208         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3209
3210         bnad->stats.bna_stats = &bna->stats;
3211
3212         bnad_enable_msix(bnad);
3213         err = bnad_mbox_irq_alloc(bnad);
3214         if (err)
3215                 goto res_free;
3216
3217
3218         /* Set up timers */
3219         setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3220                                 ((unsigned long)bnad));
3221         setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3222                                 ((unsigned long)bnad));
3223         setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3224                                 ((unsigned long)bnad));
3225         setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3226                                 ((unsigned long)bnad));
3227
3228         /* Now start the timer before calling IOC */
3229         mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3230                   jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3231
3232         /*
3233          * Start the chip
3234          * If the call back comes with error, we bail out.
3235          * This is a catastrophic error.
3236          */
3237         err = bnad_ioceth_enable(bnad);
3238         if (err) {
3239                 pr_err("BNA: Initialization failed err=%d\n",
3240                        err);
3241                 goto probe_success;
3242         }
3243
3244         spin_lock_irqsave(&bnad->bna_lock, flags);
3245         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3246                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3247                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3248                         bna_attr(bna)->num_rxp - 1);
3249                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3250                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3251                         err = -EIO;
3252         }
3253         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3254         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3255
3256         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3257         if (err)
3258                 goto disable_ioceth;
3259
3260         spin_lock_irqsave(&bnad->bna_lock, flags);
3261         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3262         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3263
3264         /* Get the burnt-in mac */
3265         spin_lock_irqsave(&bnad->bna_lock, flags);
3266         bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3267         bnad_set_netdev_perm_addr(bnad);
3268         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3269
3270         /* Finally, reguister with net_device layer */
3271         err = register_netdev(netdev);
3272         if (err) {
3273                 pr_err("BNA : Registering with netdev failed\n");
3274                 goto probe_uninit;
3275         }
3276         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3277
3278 probe_success:
3279         mutex_unlock(&bnad->conf_mutex);
3280         return 0;
3281
3282 probe_uninit:
3283         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3284 disable_ioceth:
3285         bnad_ioceth_disable(bnad);
3286         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3287         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3288         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3289         spin_lock_irqsave(&bnad->bna_lock, flags);
3290         bna_uninit(bna);
3291         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3292         bnad_mbox_irq_free(bnad);
3293         bnad_disable_msix(bnad);
3294 res_free:
3295         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3296 drv_uninit:
3297         bnad_uninit(bnad);
3298 pci_uninit:
3299         bnad_pci_uninit(pdev);
3300         mutex_unlock(&bnad->conf_mutex);
3301         bnad_lock_uninit(bnad);
3302 free_netdev:
3303         free_netdev(netdev);
3304         return err;
3305 }
3306
3307 static void __devexit
3308 bnad_pci_remove(struct pci_dev *pdev)
3309 {
3310         struct net_device *netdev = pci_get_drvdata(pdev);
3311         struct bnad *bnad;
3312         struct bna *bna;
3313         unsigned long flags;
3314
3315         if (!netdev)
3316                 return;
3317
3318         pr_info("%s bnad_pci_remove\n", netdev->name);
3319         bnad = netdev_priv(netdev);
3320         bna = &bnad->bna;
3321
3322         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3323                 unregister_netdev(netdev);
3324
3325         mutex_lock(&bnad->conf_mutex);
3326         bnad_ioceth_disable(bnad);
3327         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3328         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3329         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3330         spin_lock_irqsave(&bnad->bna_lock, flags);
3331         bna_uninit(bna);
3332         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3333
3334         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3335         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3336         bnad_mbox_irq_free(bnad);
3337         bnad_disable_msix(bnad);
3338         bnad_pci_uninit(pdev);
3339         mutex_unlock(&bnad->conf_mutex);
3340         bnad_lock_uninit(bnad);
3341         bnad_uninit(bnad);
3342         free_netdev(netdev);
3343 }
3344
3345 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3346         {
3347                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3348                         PCI_DEVICE_ID_BROCADE_CT),
3349                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3350                 .class_mask =  0xffff00
3351         }, {0,  }
3352 };
3353
3354 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3355
3356 static struct pci_driver bnad_pci_driver = {
3357         .name = BNAD_NAME,
3358         .id_table = bnad_pci_id_table,
3359         .probe = bnad_pci_probe,
3360         .remove = __devexit_p(bnad_pci_remove),
3361 };
3362
3363 static int __init
3364 bnad_module_init(void)
3365 {
3366         int err;
3367
3368         pr_info("Brocade 10G Ethernet driver - version: %s\n",
3369                         BNAD_VERSION);
3370
3371         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3372
3373         err = pci_register_driver(&bnad_pci_driver);
3374         if (err < 0) {
3375                 pr_err("bna : PCI registration failed in module init "
3376                        "(%d)\n", err);
3377                 return err;
3378         }
3379
3380         return 0;
3381 }
3382
3383 static void __exit
3384 bnad_module_exit(void)
3385 {
3386         pci_unregister_driver(&bnad_pci_driver);
3387
3388         if (bfi_fw)
3389                 release_firmware(bfi_fw);
3390 }
3391
3392 module_init(bnad_module_init);
3393 module_exit(bnad_module_exit);
3394
3395 MODULE_AUTHOR("Brocade");
3396 MODULE_LICENSE("GPL");
3397 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3398 MODULE_VERSION(BNAD_VERSION);
3399 MODULE_FIRMWARE(CNA_FW_FILE_CT);