Merge branch 'devel-stable' into for-linus
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/ip.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
25
26 #include "bnx2x_init.h"
27
28 static int bnx2x_setup_irqs(struct bnx2x *bp);
29
30 /**
31  * bnx2x_bz_fp - zero content of the fastpath structure.
32  *
33  * @bp:         driver handle
34  * @index:      fastpath index to be zeroed
35  *
36  * Makes sure the contents of the bp->fp[index].napi is kept
37  * intact.
38  */
39 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
40 {
41         struct bnx2x_fastpath *fp = &bp->fp[index];
42         struct napi_struct orig_napi = fp->napi;
43         /* bzero bnx2x_fastpath contents */
44         memset(fp, 0, sizeof(*fp));
45
46         /* Restore the NAPI object as it has been already initialized */
47         fp->napi = orig_napi;
48 }
49
50 /**
51  * bnx2x_move_fp - move content of the fastpath structure.
52  *
53  * @bp:         driver handle
54  * @from:       source FP index
55  * @to:         destination FP index
56  *
57  * Makes sure the contents of the bp->fp[to].napi is kept
58  * intact.
59  */
60 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
61 {
62         struct bnx2x_fastpath *from_fp = &bp->fp[from];
63         struct bnx2x_fastpath *to_fp = &bp->fp[to];
64         struct napi_struct orig_napi = to_fp->napi;
65         /* Move bnx2x_fastpath contents */
66         memcpy(to_fp, from_fp, sizeof(*to_fp));
67         to_fp->index = to;
68
69         /* Restore the NAPI object as it has been already initialized */
70         to_fp->napi = orig_napi;
71 }
72
73 /* free skb in the packet ring at pos idx
74  * return idx of last bd freed
75  */
76 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
77                              u16 idx)
78 {
79         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
80         struct eth_tx_start_bd *tx_start_bd;
81         struct eth_tx_bd *tx_data_bd;
82         struct sk_buff *skb = tx_buf->skb;
83         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
84         int nbd;
85
86         /* prefetch skb end pointer to speedup dev_kfree_skb() */
87         prefetch(&skb->end);
88
89         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
90            idx, tx_buf, skb);
91
92         /* unmap first bd */
93         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
94         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
95         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
96                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
97
98         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
99 #ifdef BNX2X_STOP_ON_ERROR
100         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
101                 BNX2X_ERR("BAD nbd!\n");
102                 bnx2x_panic();
103         }
104 #endif
105         new_cons = nbd + tx_buf->first_bd;
106
107         /* Get the next bd */
108         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
109
110         /* Skip a parse bd... */
111         --nbd;
112         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
113
114         /* ...and the TSO split header bd since they have no mapping */
115         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
116                 --nbd;
117                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
118         }
119
120         /* now free frags */
121         while (nbd > 0) {
122
123                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
124                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
125                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
126                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
127                 if (--nbd)
128                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
129         }
130
131         /* release skb */
132         WARN_ON(!skb);
133         dev_kfree_skb(skb);
134         tx_buf->first_bd = 0;
135         tx_buf->skb = NULL;
136
137         return new_cons;
138 }
139
140 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
141 {
142         struct bnx2x *bp = fp->bp;
143         struct netdev_queue *txq;
144         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
145
146 #ifdef BNX2X_STOP_ON_ERROR
147         if (unlikely(bp->panic))
148                 return -1;
149 #endif
150
151         txq = netdev_get_tx_queue(bp->dev, fp->index);
152         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
153         sw_cons = fp->tx_pkt_cons;
154
155         while (sw_cons != hw_cons) {
156                 u16 pkt_cons;
157
158                 pkt_cons = TX_BD(sw_cons);
159
160                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
161                                       " pkt_cons %u\n",
162                    fp->index, hw_cons, sw_cons, pkt_cons);
163
164                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
165                 sw_cons++;
166         }
167
168         fp->tx_pkt_cons = sw_cons;
169         fp->tx_bd_cons = bd_cons;
170
171         /* Need to make the tx_bd_cons update visible to start_xmit()
172          * before checking for netif_tx_queue_stopped().  Without the
173          * memory barrier, there is a small possibility that
174          * start_xmit() will miss it and cause the queue to be stopped
175          * forever.
176          */
177         smp_mb();
178
179         if (unlikely(netif_tx_queue_stopped(txq))) {
180                 /* Taking tx_lock() is needed to prevent reenabling the queue
181                  * while it's empty. This could have happen if rx_action() gets
182                  * suspended in bnx2x_tx_int() after the condition before
183                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
184                  *
185                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
186                  * sends some packets consuming the whole queue again->
187                  * stops the queue
188                  */
189
190                 __netif_tx_lock(txq, smp_processor_id());
191
192                 if ((netif_tx_queue_stopped(txq)) &&
193                     (bp->state == BNX2X_STATE_OPEN) &&
194                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
195                         netif_tx_wake_queue(txq);
196
197                 __netif_tx_unlock(txq);
198         }
199         return 0;
200 }
201
202 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
203                                              u16 idx)
204 {
205         u16 last_max = fp->last_max_sge;
206
207         if (SUB_S16(idx, last_max) > 0)
208                 fp->last_max_sge = idx;
209 }
210
211 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
212                                   struct eth_fast_path_rx_cqe *fp_cqe)
213 {
214         struct bnx2x *bp = fp->bp;
215         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
216                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
217                       SGE_PAGE_SHIFT;
218         u16 last_max, last_elem, first_elem;
219         u16 delta = 0;
220         u16 i;
221
222         if (!sge_len)
223                 return;
224
225         /* First mark all used pages */
226         for (i = 0; i < sge_len; i++)
227                 SGE_MASK_CLEAR_BIT(fp,
228                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
229
230         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
231            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
232
233         /* Here we assume that the last SGE index is the biggest */
234         prefetch((void *)(fp->sge_mask));
235         bnx2x_update_last_max_sge(fp,
236                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
237
238         last_max = RX_SGE(fp->last_max_sge);
239         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
240         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
241
242         /* If ring is not full */
243         if (last_elem + 1 != first_elem)
244                 last_elem++;
245
246         /* Now update the prod */
247         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
248                 if (likely(fp->sge_mask[i]))
249                         break;
250
251                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
252                 delta += RX_SGE_MASK_ELEM_SZ;
253         }
254
255         if (delta > 0) {
256                 fp->rx_sge_prod += delta;
257                 /* clear page-end entries */
258                 bnx2x_clear_sge_mask_next_elems(fp);
259         }
260
261         DP(NETIF_MSG_RX_STATUS,
262            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
263            fp->last_max_sge, fp->rx_sge_prod);
264 }
265
266 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
267                             struct sk_buff *skb, u16 cons, u16 prod)
268 {
269         struct bnx2x *bp = fp->bp;
270         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
271         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
272         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
273         dma_addr_t mapping;
274
275         /* move empty skb from pool to prod and map it */
276         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
277         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
278                                  fp->rx_buf_size, DMA_FROM_DEVICE);
279         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
280
281         /* move partial skb from cons to pool (don't unmap yet) */
282         fp->tpa_pool[queue] = *cons_rx_buf;
283
284         /* mark bin state as start - print error if current state != stop */
285         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
286                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
287
288         fp->tpa_state[queue] = BNX2X_TPA_START;
289
290         /* point prod_bd to new skb */
291         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
292         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
293
294 #ifdef BNX2X_STOP_ON_ERROR
295         fp->tpa_queue_used |= (1 << queue);
296 #ifdef _ASM_GENERIC_INT_L64_H
297         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
298 #else
299         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
300 #endif
301            fp->tpa_queue_used);
302 #endif
303 }
304
305 /* Timestamp option length allowed for TPA aggregation:
306  *
307  *              nop nop kind length echo val
308  */
309 #define TPA_TSTAMP_OPT_LEN      12
310 /**
311  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
312  *
313  * @bp:                 driver handle
314  * @parsing_flags:      parsing flags from the START CQE
315  * @len_on_bd:          total length of the first packet for the
316  *                      aggregation.
317  *
318  * Approximate value of the MSS for this aggregation calculated using
319  * the first packet of it.
320  */
321 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
322                                     u16 len_on_bd)
323 {
324         /* TPA arrgregation won't have an IP options and TCP options
325          * other than timestamp.
326          */
327         u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
328
329
330         /* Check if there was a TCP timestamp, if there is it's will
331          * always be 12 bytes length: nop nop kind length echo val.
332          *
333          * Otherwise FW would close the aggregation.
334          */
335         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
336                 hdrs_len += TPA_TSTAMP_OPT_LEN;
337
338         return len_on_bd - hdrs_len;
339 }
340
341 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
342                                struct sk_buff *skb,
343                                struct eth_fast_path_rx_cqe *fp_cqe,
344                                u16 cqe_idx, u16 parsing_flags)
345 {
346         struct sw_rx_page *rx_pg, old_rx_pg;
347         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
348         u32 i, frag_len, frag_size, pages;
349         int err;
350         int j;
351
352         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
353         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
354
355         /* This is needed in order to enable forwarding support */
356         if (frag_size)
357                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
358                                                               len_on_bd);
359
360 #ifdef BNX2X_STOP_ON_ERROR
361         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
362                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
363                           pages, cqe_idx);
364                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
365                           fp_cqe->pkt_len, len_on_bd);
366                 bnx2x_panic();
367                 return -EINVAL;
368         }
369 #endif
370
371         /* Run through the SGL and compose the fragmented skb */
372         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
373                 u16 sge_idx =
374                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
375
376                 /* FW gives the indices of the SGE as if the ring is an array
377                    (meaning that "next" element will consume 2 indices) */
378                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
379                 rx_pg = &fp->rx_page_ring[sge_idx];
380                 old_rx_pg = *rx_pg;
381
382                 /* If we fail to allocate a substitute page, we simply stop
383                    where we are and drop the whole packet */
384                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
385                 if (unlikely(err)) {
386                         fp->eth_q_stats.rx_skb_alloc_failed++;
387                         return err;
388                 }
389
390                 /* Unmap the page as we r going to pass it to the stack */
391                 dma_unmap_page(&bp->pdev->dev,
392                                dma_unmap_addr(&old_rx_pg, mapping),
393                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
394
395                 /* Add one frag and update the appropriate fields in the skb */
396                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
397
398                 skb->data_len += frag_len;
399                 skb->truesize += frag_len;
400                 skb->len += frag_len;
401
402                 frag_size -= frag_len;
403         }
404
405         return 0;
406 }
407
408 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
409                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
410                            u16 cqe_idx)
411 {
412         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
413         struct sk_buff *skb = rx_buf->skb;
414         /* alloc new skb */
415         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
416
417         /* Unmap skb in the pool anyway, as we are going to change
418            pool entry status to BNX2X_TPA_STOP even if new skb allocation
419            fails. */
420         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
421                          fp->rx_buf_size, DMA_FROM_DEVICE);
422
423         if (likely(new_skb)) {
424                 /* fix ip xsum and give it to the stack */
425                 /* (no need to map the new skb) */
426                 u16 parsing_flags =
427                         le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
428
429                 prefetch(skb);
430                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
431
432 #ifdef BNX2X_STOP_ON_ERROR
433                 if (pad + len > fp->rx_buf_size) {
434                         BNX2X_ERR("skb_put is about to fail...  "
435                                   "pad %d  len %d  rx_buf_size %d\n",
436                                   pad, len, fp->rx_buf_size);
437                         bnx2x_panic();
438                         return;
439                 }
440 #endif
441
442                 skb_reserve(skb, pad);
443                 skb_put(skb, len);
444
445                 skb->protocol = eth_type_trans(skb, bp->dev);
446                 skb->ip_summed = CHECKSUM_UNNECESSARY;
447
448                 {
449                         struct iphdr *iph;
450
451                         iph = (struct iphdr *)skb->data;
452                         iph->check = 0;
453                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
454                 }
455
456                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
457                                          &cqe->fast_path_cqe, cqe_idx,
458                                          parsing_flags)) {
459                         if (parsing_flags & PARSING_FLAGS_VLAN)
460                                 __vlan_hwaccel_put_tag(skb,
461                                                  le16_to_cpu(cqe->fast_path_cqe.
462                                                              vlan_tag));
463                         napi_gro_receive(&fp->napi, skb);
464                 } else {
465                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
466                            " - dropping packet!\n");
467                         dev_kfree_skb(skb);
468                 }
469
470
471                 /* put new skb in bin */
472                 fp->tpa_pool[queue].skb = new_skb;
473
474         } else {
475                 /* else drop the packet and keep the buffer in the bin */
476                 DP(NETIF_MSG_RX_STATUS,
477                    "Failed to allocate new skb - dropping packet!\n");
478                 fp->eth_q_stats.rx_skb_alloc_failed++;
479         }
480
481         fp->tpa_state[queue] = BNX2X_TPA_STOP;
482 }
483
484 /* Set Toeplitz hash value in the skb using the value from the
485  * CQE (calculated by HW).
486  */
487 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
488                                         struct sk_buff *skb)
489 {
490         /* Set Toeplitz hash from CQE */
491         if ((bp->dev->features & NETIF_F_RXHASH) &&
492             (cqe->fast_path_cqe.status_flags &
493              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
494                 skb->rxhash =
495                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
496 }
497
498 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
499 {
500         struct bnx2x *bp = fp->bp;
501         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
502         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
503         int rx_pkt = 0;
504
505 #ifdef BNX2X_STOP_ON_ERROR
506         if (unlikely(bp->panic))
507                 return 0;
508 #endif
509
510         /* CQ "next element" is of the size of the regular element,
511            that's why it's ok here */
512         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
513         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
514                 hw_comp_cons++;
515
516         bd_cons = fp->rx_bd_cons;
517         bd_prod = fp->rx_bd_prod;
518         bd_prod_fw = bd_prod;
519         sw_comp_cons = fp->rx_comp_cons;
520         sw_comp_prod = fp->rx_comp_prod;
521
522         /* Memory barrier necessary as speculative reads of the rx
523          * buffer can be ahead of the index in the status block
524          */
525         rmb();
526
527         DP(NETIF_MSG_RX_STATUS,
528            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
529            fp->index, hw_comp_cons, sw_comp_cons);
530
531         while (sw_comp_cons != hw_comp_cons) {
532                 struct sw_rx_bd *rx_buf = NULL;
533                 struct sk_buff *skb;
534                 union eth_rx_cqe *cqe;
535                 u8 cqe_fp_flags;
536                 u16 len, pad;
537
538                 comp_ring_cons = RCQ_BD(sw_comp_cons);
539                 bd_prod = RX_BD(bd_prod);
540                 bd_cons = RX_BD(bd_cons);
541
542                 /* Prefetch the page containing the BD descriptor
543                    at producer's index. It will be needed when new skb is
544                    allocated */
545                 prefetch((void *)(PAGE_ALIGN((unsigned long)
546                                              (&fp->rx_desc_ring[bd_prod])) -
547                                   PAGE_SIZE + 1));
548
549                 cqe = &fp->rx_comp_ring[comp_ring_cons];
550                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
551
552                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
553                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
554                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
555                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
556                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
557                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
558
559                 /* is this a slowpath msg? */
560                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
561                         bnx2x_sp_event(fp, cqe);
562                         goto next_cqe;
563
564                 /* this is an rx packet */
565                 } else {
566                         rx_buf = &fp->rx_buf_ring[bd_cons];
567                         skb = rx_buf->skb;
568                         prefetch(skb);
569                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
570                         pad = cqe->fast_path_cqe.placement_offset;
571
572                         /* - If CQE is marked both TPA_START and TPA_END it is
573                          *   a non-TPA CQE.
574                          * - FP CQE will always have either TPA_START or/and
575                          *   TPA_STOP flags set.
576                          */
577                         if ((!fp->disable_tpa) &&
578                             (TPA_TYPE(cqe_fp_flags) !=
579                                         (TPA_TYPE_START | TPA_TYPE_END))) {
580                                 u16 queue = cqe->fast_path_cqe.queue_index;
581
582                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
583                                         DP(NETIF_MSG_RX_STATUS,
584                                            "calling tpa_start on queue %d\n",
585                                            queue);
586
587                                         bnx2x_tpa_start(fp, queue, skb,
588                                                         bd_cons, bd_prod);
589
590                                         /* Set Toeplitz hash for an LRO skb */
591                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
592
593                                         goto next_rx;
594                                 } else { /* TPA_STOP */
595                                         DP(NETIF_MSG_RX_STATUS,
596                                            "calling tpa_stop on queue %d\n",
597                                            queue);
598
599                                         if (!BNX2X_RX_SUM_FIX(cqe))
600                                                 BNX2X_ERR("STOP on none TCP "
601                                                           "data\n");
602
603                                         /* This is a size of the linear data
604                                            on this skb */
605                                         len = le16_to_cpu(cqe->fast_path_cqe.
606                                                                 len_on_bd);
607                                         bnx2x_tpa_stop(bp, fp, queue, pad,
608                                                     len, cqe, comp_ring_cons);
609 #ifdef BNX2X_STOP_ON_ERROR
610                                         if (bp->panic)
611                                                 return 0;
612 #endif
613
614                                         bnx2x_update_sge_prod(fp,
615                                                         &cqe->fast_path_cqe);
616                                         goto next_cqe;
617                                 }
618                         }
619
620                         dma_sync_single_for_device(&bp->pdev->dev,
621                                         dma_unmap_addr(rx_buf, mapping),
622                                                    pad + RX_COPY_THRESH,
623                                                    DMA_FROM_DEVICE);
624                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
625
626                         /* is this an error packet? */
627                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
628                                 DP(NETIF_MSG_RX_ERR,
629                                    "ERROR  flags %x  rx packet %u\n",
630                                    cqe_fp_flags, sw_comp_cons);
631                                 fp->eth_q_stats.rx_err_discard_pkt++;
632                                 goto reuse_rx;
633                         }
634
635                         /* Since we don't have a jumbo ring
636                          * copy small packets if mtu > 1500
637                          */
638                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
639                             (len <= RX_COPY_THRESH)) {
640                                 struct sk_buff *new_skb;
641
642                                 new_skb = netdev_alloc_skb(bp->dev,
643                                                            len + pad);
644                                 if (new_skb == NULL) {
645                                         DP(NETIF_MSG_RX_ERR,
646                                            "ERROR  packet dropped "
647                                            "because of alloc failure\n");
648                                         fp->eth_q_stats.rx_skb_alloc_failed++;
649                                         goto reuse_rx;
650                                 }
651
652                                 /* aligned copy */
653                                 skb_copy_from_linear_data_offset(skb, pad,
654                                                     new_skb->data + pad, len);
655                                 skb_reserve(new_skb, pad);
656                                 skb_put(new_skb, len);
657
658                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
659
660                                 skb = new_skb;
661
662                         } else
663                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
664                                 dma_unmap_single(&bp->pdev->dev,
665                                         dma_unmap_addr(rx_buf, mapping),
666                                                  fp->rx_buf_size,
667                                                  DMA_FROM_DEVICE);
668                                 skb_reserve(skb, pad);
669                                 skb_put(skb, len);
670
671                         } else {
672                                 DP(NETIF_MSG_RX_ERR,
673                                    "ERROR  packet dropped because "
674                                    "of alloc failure\n");
675                                 fp->eth_q_stats.rx_skb_alloc_failed++;
676 reuse_rx:
677                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
678                                 goto next_rx;
679                         }
680
681                         skb->protocol = eth_type_trans(skb, bp->dev);
682
683                         /* Set Toeplitz hash for a none-LRO skb */
684                         bnx2x_set_skb_rxhash(bp, cqe, skb);
685
686                         skb_checksum_none_assert(skb);
687
688                         if (bp->dev->features & NETIF_F_RXCSUM) {
689                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
690                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
691                                 else
692                                         fp->eth_q_stats.hw_csum_err++;
693                         }
694                 }
695
696                 skb_record_rx_queue(skb, fp->index);
697
698                 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
699                      PARSING_FLAGS_VLAN)
700                         __vlan_hwaccel_put_tag(skb,
701                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
702                 napi_gro_receive(&fp->napi, skb);
703
704
705 next_rx:
706                 rx_buf->skb = NULL;
707
708                 bd_cons = NEXT_RX_IDX(bd_cons);
709                 bd_prod = NEXT_RX_IDX(bd_prod);
710                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
711                 rx_pkt++;
712 next_cqe:
713                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
714                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
715
716                 if (rx_pkt == budget)
717                         break;
718         } /* while */
719
720         fp->rx_bd_cons = bd_cons;
721         fp->rx_bd_prod = bd_prod_fw;
722         fp->rx_comp_cons = sw_comp_cons;
723         fp->rx_comp_prod = sw_comp_prod;
724
725         /* Update producers */
726         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
727                              fp->rx_sge_prod);
728
729         fp->rx_pkt += rx_pkt;
730         fp->rx_calls++;
731
732         return rx_pkt;
733 }
734
735 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
736 {
737         struct bnx2x_fastpath *fp = fp_cookie;
738         struct bnx2x *bp = fp->bp;
739
740         /* Return here if interrupt is disabled */
741         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
742                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
743                 return IRQ_HANDLED;
744         }
745
746         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
747                          "[fp %d fw_sd %d igusb %d]\n",
748            fp->index, fp->fw_sb_id, fp->igu_sb_id);
749         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
750
751 #ifdef BNX2X_STOP_ON_ERROR
752         if (unlikely(bp->panic))
753                 return IRQ_HANDLED;
754 #endif
755
756         /* Handle Rx and Tx according to MSI-X vector */
757         prefetch(fp->rx_cons_sb);
758         prefetch(fp->tx_cons_sb);
759         prefetch(&fp->sb_running_index[SM_RX_ID]);
760         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
761
762         return IRQ_HANDLED;
763 }
764
765 /* HW Lock for shared dual port PHYs */
766 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
767 {
768         mutex_lock(&bp->port.phy_mutex);
769
770         if (bp->port.need_hw_lock)
771                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
772 }
773
774 void bnx2x_release_phy_lock(struct bnx2x *bp)
775 {
776         if (bp->port.need_hw_lock)
777                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
778
779         mutex_unlock(&bp->port.phy_mutex);
780 }
781
782 /* calculates MF speed according to current linespeed and MF configuration */
783 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
784 {
785         u16 line_speed = bp->link_vars.line_speed;
786         if (IS_MF(bp)) {
787                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
788                                                    bp->mf_config[BP_VN(bp)]);
789
790                 /* Calculate the current MAX line speed limit for the MF
791                  * devices
792                  */
793                 if (IS_MF_SI(bp))
794                         line_speed = (line_speed * maxCfg) / 100;
795                 else { /* SD mode */
796                         u16 vn_max_rate = maxCfg * 100;
797
798                         if (vn_max_rate < line_speed)
799                                 line_speed = vn_max_rate;
800                 }
801         }
802
803         return line_speed;
804 }
805
806 /**
807  * bnx2x_fill_report_data - fill link report data to report
808  *
809  * @bp:         driver handle
810  * @data:       link state to update
811  *
812  * It uses a none-atomic bit operations because is called under the mutex.
813  */
814 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
815                                           struct bnx2x_link_report_data *data)
816 {
817         u16 line_speed = bnx2x_get_mf_speed(bp);
818
819         memset(data, 0, sizeof(*data));
820
821         /* Fill the report data: efective line speed */
822         data->line_speed = line_speed;
823
824         /* Link is down */
825         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
826                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
827                           &data->link_report_flags);
828
829         /* Full DUPLEX */
830         if (bp->link_vars.duplex == DUPLEX_FULL)
831                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
832
833         /* Rx Flow Control is ON */
834         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
835                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
836
837         /* Tx Flow Control is ON */
838         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
839                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
840 }
841
842 /**
843  * bnx2x_link_report - report link status to OS.
844  *
845  * @bp:         driver handle
846  *
847  * Calls the __bnx2x_link_report() under the same locking scheme
848  * as a link/PHY state managing code to ensure a consistent link
849  * reporting.
850  */
851
852 void bnx2x_link_report(struct bnx2x *bp)
853 {
854         bnx2x_acquire_phy_lock(bp);
855         __bnx2x_link_report(bp);
856         bnx2x_release_phy_lock(bp);
857 }
858
859 /**
860  * __bnx2x_link_report - report link status to OS.
861  *
862  * @bp:         driver handle
863  *
864  * None atomic inmlementation.
865  * Should be called under the phy_lock.
866  */
867 void __bnx2x_link_report(struct bnx2x *bp)
868 {
869         struct bnx2x_link_report_data cur_data;
870
871         /* reread mf_cfg */
872         if (!CHIP_IS_E1(bp))
873                 bnx2x_read_mf_cfg(bp);
874
875         /* Read the current link report info */
876         bnx2x_fill_report_data(bp, &cur_data);
877
878         /* Don't report link down or exactly the same link status twice */
879         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
880             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
881                       &bp->last_reported_link.link_report_flags) &&
882              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
883                       &cur_data.link_report_flags)))
884                 return;
885
886         bp->link_cnt++;
887
888         /* We are going to report a new link parameters now -
889          * remember the current data for the next time.
890          */
891         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
892
893         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
894                      &cur_data.link_report_flags)) {
895                 netif_carrier_off(bp->dev);
896                 netdev_err(bp->dev, "NIC Link is Down\n");
897                 return;
898         } else {
899                 netif_carrier_on(bp->dev);
900                 netdev_info(bp->dev, "NIC Link is Up, ");
901                 pr_cont("%d Mbps ", cur_data.line_speed);
902
903                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
904                                        &cur_data.link_report_flags))
905                         pr_cont("full duplex");
906                 else
907                         pr_cont("half duplex");
908
909                 /* Handle the FC at the end so that only these flags would be
910                  * possibly set. This way we may easily check if there is no FC
911                  * enabled.
912                  */
913                 if (cur_data.link_report_flags) {
914                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
915                                      &cur_data.link_report_flags)) {
916                                 pr_cont(", receive ");
917                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
918                                      &cur_data.link_report_flags))
919                                         pr_cont("& transmit ");
920                         } else {
921                                 pr_cont(", transmit ");
922                         }
923                         pr_cont("flow control ON");
924                 }
925                 pr_cont("\n");
926         }
927 }
928
929 void bnx2x_init_rx_rings(struct bnx2x *bp)
930 {
931         int func = BP_FUNC(bp);
932         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
933                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
934         u16 ring_prod;
935         int i, j;
936
937         /* Allocate TPA resources */
938         for_each_rx_queue(bp, j) {
939                 struct bnx2x_fastpath *fp = &bp->fp[j];
940
941                 DP(NETIF_MSG_IFUP,
942                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
943
944                 if (!fp->disable_tpa) {
945                         /* Fill the per-aggregation pool */
946                         for (i = 0; i < max_agg_queues; i++) {
947                                 fp->tpa_pool[i].skb =
948                                    netdev_alloc_skb(bp->dev, fp->rx_buf_size);
949                                 if (!fp->tpa_pool[i].skb) {
950                                         BNX2X_ERR("Failed to allocate TPA "
951                                                   "skb pool for queue[%d] - "
952                                                   "disabling TPA on this "
953                                                   "queue!\n", j);
954                                         bnx2x_free_tpa_pool(bp, fp, i);
955                                         fp->disable_tpa = 1;
956                                         break;
957                                 }
958                                 dma_unmap_addr_set((struct sw_rx_bd *)
959                                                         &bp->fp->tpa_pool[i],
960                                                    mapping, 0);
961                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
962                         }
963
964                         /* "next page" elements initialization */
965                         bnx2x_set_next_page_sgl(fp);
966
967                         /* set SGEs bit mask */
968                         bnx2x_init_sge_ring_bit_mask(fp);
969
970                         /* Allocate SGEs and initialize the ring elements */
971                         for (i = 0, ring_prod = 0;
972                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
973
974                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
975                                         BNX2X_ERR("was only able to allocate "
976                                                   "%d rx sges\n", i);
977                                         BNX2X_ERR("disabling TPA for"
978                                                   " queue[%d]\n", j);
979                                         /* Cleanup already allocated elements */
980                                         bnx2x_free_rx_sge_range(bp,
981                                                                 fp, ring_prod);
982                                         bnx2x_free_tpa_pool(bp,
983                                                             fp, max_agg_queues);
984                                         fp->disable_tpa = 1;
985                                         ring_prod = 0;
986                                         break;
987                                 }
988                                 ring_prod = NEXT_SGE_IDX(ring_prod);
989                         }
990
991                         fp->rx_sge_prod = ring_prod;
992                 }
993         }
994
995         for_each_rx_queue(bp, j) {
996                 struct bnx2x_fastpath *fp = &bp->fp[j];
997
998                 fp->rx_bd_cons = 0;
999
1000                 /* Activate BD ring */
1001                 /* Warning!
1002                  * this will generate an interrupt (to the TSTORM)
1003                  * must only be done after chip is initialized
1004                  */
1005                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1006                                      fp->rx_sge_prod);
1007
1008                 if (j != 0)
1009                         continue;
1010
1011                 if (!CHIP_IS_E2(bp)) {
1012                         REG_WR(bp, BAR_USTRORM_INTMEM +
1013                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1014                                U64_LO(fp->rx_comp_mapping));
1015                         REG_WR(bp, BAR_USTRORM_INTMEM +
1016                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1017                                U64_HI(fp->rx_comp_mapping));
1018                 }
1019         }
1020 }
1021
1022 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1023 {
1024         int i;
1025
1026         for_each_tx_queue(bp, i) {
1027                 struct bnx2x_fastpath *fp = &bp->fp[i];
1028
1029                 u16 bd_cons = fp->tx_bd_cons;
1030                 u16 sw_prod = fp->tx_pkt_prod;
1031                 u16 sw_cons = fp->tx_pkt_cons;
1032
1033                 while (sw_cons != sw_prod) {
1034                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1035                         sw_cons++;
1036                 }
1037         }
1038 }
1039
1040 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1041 {
1042         struct bnx2x *bp = fp->bp;
1043         int i;
1044
1045         /* ring wasn't allocated */
1046         if (fp->rx_buf_ring == NULL)
1047                 return;
1048
1049         for (i = 0; i < NUM_RX_BD; i++) {
1050                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1051                 struct sk_buff *skb = rx_buf->skb;
1052
1053                 if (skb == NULL)
1054                         continue;
1055
1056                 dma_unmap_single(&bp->pdev->dev,
1057                                  dma_unmap_addr(rx_buf, mapping),
1058                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1059
1060                 rx_buf->skb = NULL;
1061                 dev_kfree_skb(skb);
1062         }
1063 }
1064
1065 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1066 {
1067         int j;
1068
1069         for_each_rx_queue(bp, j) {
1070                 struct bnx2x_fastpath *fp = &bp->fp[j];
1071
1072                 bnx2x_free_rx_bds(fp);
1073
1074                 if (!fp->disable_tpa)
1075                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1076                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
1077                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
1078         }
1079 }
1080
1081 void bnx2x_free_skbs(struct bnx2x *bp)
1082 {
1083         bnx2x_free_tx_skbs(bp);
1084         bnx2x_free_rx_skbs(bp);
1085 }
1086
1087 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1088 {
1089         /* load old values */
1090         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1091
1092         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1093                 /* leave all but MAX value */
1094                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1095
1096                 /* set new MAX value */
1097                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1098                                 & FUNC_MF_CFG_MAX_BW_MASK;
1099
1100                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1101         }
1102 }
1103
1104 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1105 {
1106         int i, offset = 1;
1107
1108         free_irq(bp->msix_table[0].vector, bp->dev);
1109         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1110            bp->msix_table[0].vector);
1111
1112 #ifdef BCM_CNIC
1113         offset++;
1114 #endif
1115         for_each_eth_queue(bp, i) {
1116                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
1117                    "state %x\n", i, bp->msix_table[i + offset].vector,
1118                    bnx2x_fp(bp, i, state));
1119
1120                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1121         }
1122 }
1123
1124 void bnx2x_free_irq(struct bnx2x *bp)
1125 {
1126         if (bp->flags & USING_MSIX_FLAG)
1127                 bnx2x_free_msix_irqs(bp);
1128         else if (bp->flags & USING_MSI_FLAG)
1129                 free_irq(bp->pdev->irq, bp->dev);
1130         else
1131                 free_irq(bp->pdev->irq, bp->dev);
1132 }
1133
1134 int bnx2x_enable_msix(struct bnx2x *bp)
1135 {
1136         int msix_vec = 0, i, rc, req_cnt;
1137
1138         bp->msix_table[msix_vec].entry = msix_vec;
1139         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1140            bp->msix_table[0].entry);
1141         msix_vec++;
1142
1143 #ifdef BCM_CNIC
1144         bp->msix_table[msix_vec].entry = msix_vec;
1145         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1146            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1147         msix_vec++;
1148 #endif
1149         for_each_eth_queue(bp, i) {
1150                 bp->msix_table[msix_vec].entry = msix_vec;
1151                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1152                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1153                 msix_vec++;
1154         }
1155
1156         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1157
1158         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1159
1160         /*
1161          * reconfigure number of tx/rx queues according to available
1162          * MSI-X vectors
1163          */
1164         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1165                 /* how less vectors we will have? */
1166                 int diff = req_cnt - rc;
1167
1168                 DP(NETIF_MSG_IFUP,
1169                    "Trying to use less MSI-X vectors: %d\n", rc);
1170
1171                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1172
1173                 if (rc) {
1174                         DP(NETIF_MSG_IFUP,
1175                            "MSI-X is not attainable  rc %d\n", rc);
1176                         return rc;
1177                 }
1178                 /*
1179                  * decrease number of queues by number of unallocated entries
1180                  */
1181                 bp->num_queues -= diff;
1182
1183                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1184                                   bp->num_queues);
1185         } else if (rc) {
1186                 /* fall to INTx if not enough memory */
1187                 if (rc == -ENOMEM)
1188                         bp->flags |= DISABLE_MSI_FLAG;
1189                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1190                 return rc;
1191         }
1192
1193         bp->flags |= USING_MSIX_FLAG;
1194
1195         return 0;
1196 }
1197
1198 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1199 {
1200         int i, rc, offset = 1;
1201
1202         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1203                          bp->dev->name, bp->dev);
1204         if (rc) {
1205                 BNX2X_ERR("request sp irq failed\n");
1206                 return -EBUSY;
1207         }
1208
1209 #ifdef BCM_CNIC
1210         offset++;
1211 #endif
1212         for_each_eth_queue(bp, i) {
1213                 struct bnx2x_fastpath *fp = &bp->fp[i];
1214                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1215                          bp->dev->name, i);
1216
1217                 rc = request_irq(bp->msix_table[offset].vector,
1218                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1219                 if (rc) {
1220                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
1221                         bnx2x_free_msix_irqs(bp);
1222                         return -EBUSY;
1223                 }
1224
1225                 offset++;
1226                 fp->state = BNX2X_FP_STATE_IRQ;
1227         }
1228
1229         i = BNX2X_NUM_ETH_QUEUES(bp);
1230         offset = 1 + CNIC_CONTEXT_USE;
1231         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1232                " ... fp[%d] %d\n",
1233                bp->msix_table[0].vector,
1234                0, bp->msix_table[offset].vector,
1235                i - 1, bp->msix_table[offset + i - 1].vector);
1236
1237         return 0;
1238 }
1239
1240 int bnx2x_enable_msi(struct bnx2x *bp)
1241 {
1242         int rc;
1243
1244         rc = pci_enable_msi(bp->pdev);
1245         if (rc) {
1246                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1247                 return -1;
1248         }
1249         bp->flags |= USING_MSI_FLAG;
1250
1251         return 0;
1252 }
1253
1254 static int bnx2x_req_irq(struct bnx2x *bp)
1255 {
1256         unsigned long flags;
1257         int rc;
1258
1259         if (bp->flags & USING_MSI_FLAG)
1260                 flags = 0;
1261         else
1262                 flags = IRQF_SHARED;
1263
1264         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1265                          bp->dev->name, bp->dev);
1266         if (!rc)
1267                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1268
1269         return rc;
1270 }
1271
1272 static void bnx2x_napi_enable(struct bnx2x *bp)
1273 {
1274         int i;
1275
1276         for_each_napi_queue(bp, i)
1277                 napi_enable(&bnx2x_fp(bp, i, napi));
1278 }
1279
1280 static void bnx2x_napi_disable(struct bnx2x *bp)
1281 {
1282         int i;
1283
1284         for_each_napi_queue(bp, i)
1285                 napi_disable(&bnx2x_fp(bp, i, napi));
1286 }
1287
1288 void bnx2x_netif_start(struct bnx2x *bp)
1289 {
1290         int intr_sem;
1291
1292         intr_sem = atomic_dec_and_test(&bp->intr_sem);
1293         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1294
1295         if (intr_sem) {
1296                 if (netif_running(bp->dev)) {
1297                         bnx2x_napi_enable(bp);
1298                         bnx2x_int_enable(bp);
1299                         if (bp->state == BNX2X_STATE_OPEN)
1300                                 netif_tx_wake_all_queues(bp->dev);
1301                 }
1302         }
1303 }
1304
1305 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1306 {
1307         bnx2x_int_disable_sync(bp, disable_hw);
1308         bnx2x_napi_disable(bp);
1309         netif_tx_disable(bp->dev);
1310 }
1311
1312 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1313 {
1314 #ifdef BCM_CNIC
1315         struct bnx2x *bp = netdev_priv(dev);
1316         if (NO_FCOE(bp))
1317                 return skb_tx_hash(dev, skb);
1318         else {
1319                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1320                 u16 ether_type = ntohs(hdr->h_proto);
1321
1322                 /* Skip VLAN tag if present */
1323                 if (ether_type == ETH_P_8021Q) {
1324                         struct vlan_ethhdr *vhdr =
1325                                 (struct vlan_ethhdr *)skb->data;
1326
1327                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1328                 }
1329
1330                 /* If ethertype is FCoE or FIP - use FCoE ring */
1331                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1332                         return bnx2x_fcoe(bp, index);
1333         }
1334 #endif
1335         /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
1336          */
1337         return __skb_tx_hash(dev, skb,
1338                         dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1339 }
1340
1341 void bnx2x_set_num_queues(struct bnx2x *bp)
1342 {
1343         switch (bp->multi_mode) {
1344         case ETH_RSS_MODE_DISABLED:
1345                 bp->num_queues = 1;
1346                 break;
1347         case ETH_RSS_MODE_REGULAR:
1348                 bp->num_queues = bnx2x_calc_num_queues(bp);
1349                 break;
1350
1351         default:
1352                 bp->num_queues = 1;
1353                 break;
1354         }
1355
1356         /* Add special queues */
1357         bp->num_queues += NONE_ETH_CONTEXT_USE;
1358 }
1359
1360 #ifdef BCM_CNIC
1361 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1362 {
1363         if (!NO_FCOE(bp)) {
1364                 if (!IS_MF_SD(bp))
1365                         bnx2x_set_fip_eth_mac_addr(bp, 1);
1366                 bnx2x_set_all_enode_macs(bp, 1);
1367                 bp->flags |= FCOE_MACS_SET;
1368         }
1369 }
1370 #endif
1371
1372 static void bnx2x_release_firmware(struct bnx2x *bp)
1373 {
1374         kfree(bp->init_ops_offsets);
1375         kfree(bp->init_ops);
1376         kfree(bp->init_data);
1377         release_firmware(bp->firmware);
1378 }
1379
1380 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1381 {
1382         int rc, num = bp->num_queues;
1383
1384 #ifdef BCM_CNIC
1385         if (NO_FCOE(bp))
1386                 num -= FCOE_CONTEXT_USE;
1387
1388 #endif
1389         netif_set_real_num_tx_queues(bp->dev, num);
1390         rc = netif_set_real_num_rx_queues(bp->dev, num);
1391         return rc;
1392 }
1393
1394 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1395 {
1396         int i;
1397
1398         for_each_queue(bp, i) {
1399                 struct bnx2x_fastpath *fp = &bp->fp[i];
1400
1401                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1402                 if (IS_FCOE_IDX(i))
1403                         /*
1404                          * Although there are no IP frames expected to arrive to
1405                          * this ring we still want to add an
1406                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1407                          * overrun attack.
1408                          */
1409                         fp->rx_buf_size =
1410                                 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1411                                 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1412                 else
1413                         fp->rx_buf_size =
1414                                 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1415                                 IP_HEADER_ALIGNMENT_PADDING;
1416         }
1417 }
1418
1419 /* must be called with rtnl_lock */
1420 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1421 {
1422         u32 load_code;
1423         int i, rc;
1424
1425         /* Set init arrays */
1426         rc = bnx2x_init_firmware(bp);
1427         if (rc) {
1428                 BNX2X_ERR("Error loading firmware\n");
1429                 return rc;
1430         }
1431
1432 #ifdef BNX2X_STOP_ON_ERROR
1433         if (unlikely(bp->panic))
1434                 return -EPERM;
1435 #endif
1436
1437         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1438
1439         /* Set the initial link reported state to link down */
1440         bnx2x_acquire_phy_lock(bp);
1441         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1442         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1443                 &bp->last_reported_link.link_report_flags);
1444         bnx2x_release_phy_lock(bp);
1445
1446         /* must be called before memory allocation and HW init */
1447         bnx2x_ilt_set_info(bp);
1448
1449         /* zero fastpath structures preserving invariants like napi which are
1450          * allocated only once
1451          */
1452         for_each_queue(bp, i)
1453                 bnx2x_bz_fp(bp, i);
1454
1455         /* Set the receive queues buffer size */
1456         bnx2x_set_rx_buf_size(bp);
1457
1458         for_each_queue(bp, i)
1459                 bnx2x_fp(bp, i, disable_tpa) =
1460                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1461
1462 #ifdef BCM_CNIC
1463         /* We don't want TPA on FCoE L2 ring */
1464         bnx2x_fcoe(bp, disable_tpa) = 1;
1465 #endif
1466
1467         if (bnx2x_alloc_mem(bp))
1468                 return -ENOMEM;
1469
1470         /* As long as bnx2x_alloc_mem() may possibly update
1471          * bp->num_queues, bnx2x_set_real_num_queues() should always
1472          * come after it.
1473          */
1474         rc = bnx2x_set_real_num_queues(bp);
1475         if (rc) {
1476                 BNX2X_ERR("Unable to set real_num_queues\n");
1477                 goto load_error0;
1478         }
1479
1480         bnx2x_napi_enable(bp);
1481
1482         /* Send LOAD_REQUEST command to MCP
1483            Returns the type of LOAD command:
1484            if it is the first port to be initialized
1485            common blocks should be initialized, otherwise - not
1486         */
1487         if (!BP_NOMCP(bp)) {
1488                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1489                 if (!load_code) {
1490                         BNX2X_ERR("MCP response failure, aborting\n");
1491                         rc = -EBUSY;
1492                         goto load_error1;
1493                 }
1494                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1495                         rc = -EBUSY; /* other port in diagnostic mode */
1496                         goto load_error1;
1497                 }
1498
1499         } else {
1500                 int path = BP_PATH(bp);
1501                 int port = BP_PORT(bp);
1502
1503                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1504                    path, load_count[path][0], load_count[path][1],
1505                    load_count[path][2]);
1506                 load_count[path][0]++;
1507                 load_count[path][1 + port]++;
1508                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1509                    path, load_count[path][0], load_count[path][1],
1510                    load_count[path][2]);
1511                 if (load_count[path][0] == 1)
1512                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1513                 else if (load_count[path][1 + port] == 1)
1514                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1515                 else
1516                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1517         }
1518
1519         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1520             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1521             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1522                 bp->port.pmf = 1;
1523         else
1524                 bp->port.pmf = 0;
1525         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1526
1527         /* Initialize HW */
1528         rc = bnx2x_init_hw(bp, load_code);
1529         if (rc) {
1530                 BNX2X_ERR("HW init failed, aborting\n");
1531                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1532                 goto load_error2;
1533         }
1534
1535         /* Connect to IRQs */
1536         rc = bnx2x_setup_irqs(bp);
1537         if (rc) {
1538                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1539                 goto load_error2;
1540         }
1541
1542         /* Setup NIC internals and enable interrupts */
1543         bnx2x_nic_init(bp, load_code);
1544
1545         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1546             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1547             (bp->common.shmem2_base))
1548                 SHMEM2_WR(bp, dcc_support,
1549                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1550                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1551
1552         /* Send LOAD_DONE command to MCP */
1553         if (!BP_NOMCP(bp)) {
1554                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1555                 if (!load_code) {
1556                         BNX2X_ERR("MCP response failure, aborting\n");
1557                         rc = -EBUSY;
1558                         goto load_error3;
1559                 }
1560         }
1561
1562         bnx2x_dcbx_init(bp);
1563
1564         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1565
1566         rc = bnx2x_func_start(bp);
1567         if (rc) {
1568                 BNX2X_ERR("Function start failed!\n");
1569 #ifndef BNX2X_STOP_ON_ERROR
1570                 goto load_error3;
1571 #else
1572                 bp->panic = 1;
1573                 return -EBUSY;
1574 #endif
1575         }
1576
1577         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1578         if (rc) {
1579                 BNX2X_ERR("Setup leading failed!\n");
1580 #ifndef BNX2X_STOP_ON_ERROR
1581                 goto load_error3;
1582 #else
1583                 bp->panic = 1;
1584                 return -EBUSY;
1585 #endif
1586         }
1587
1588         if (!CHIP_IS_E1(bp) &&
1589             (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1590                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1591                 bp->flags |= MF_FUNC_DIS;
1592         }
1593
1594 #ifdef BCM_CNIC
1595         /* Enable Timer scan */
1596         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1597 #endif
1598
1599         for_each_nondefault_queue(bp, i) {
1600                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1601                 if (rc)
1602 #ifdef BCM_CNIC
1603                         goto load_error4;
1604 #else
1605                         goto load_error3;
1606 #endif
1607         }
1608
1609         /* Now when Clients are configured we are ready to work */
1610         bp->state = BNX2X_STATE_OPEN;
1611
1612 #ifdef BCM_CNIC
1613         bnx2x_set_fcoe_eth_macs(bp);
1614 #endif
1615
1616         bnx2x_set_eth_mac(bp, 1);
1617
1618         /* Clear MC configuration */
1619         if (CHIP_IS_E1(bp))
1620                 bnx2x_invalidate_e1_mc_list(bp);
1621         else
1622                 bnx2x_invalidate_e1h_mc_list(bp);
1623
1624         /* Clear UC lists configuration */
1625         bnx2x_invalidate_uc_list(bp);
1626
1627         if (bp->pending_max) {
1628                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1629                 bp->pending_max = 0;
1630         }
1631
1632         if (bp->port.pmf)
1633                 bnx2x_initial_phy_init(bp, load_mode);
1634
1635         /* Initialize Rx filtering */
1636         bnx2x_set_rx_mode(bp->dev);
1637
1638         /* Start fast path */
1639         switch (load_mode) {
1640         case LOAD_NORMAL:
1641                 /* Tx queue should be only reenabled */
1642                 netif_tx_wake_all_queues(bp->dev);
1643                 /* Initialize the receive filter. */
1644                 break;
1645
1646         case LOAD_OPEN:
1647                 netif_tx_start_all_queues(bp->dev);
1648                 smp_mb__after_clear_bit();
1649                 break;
1650
1651         case LOAD_DIAG:
1652                 bp->state = BNX2X_STATE_DIAG;
1653                 break;
1654
1655         default:
1656                 break;
1657         }
1658
1659         if (!bp->port.pmf)
1660                 bnx2x__link_status_update(bp);
1661
1662         /* start the timer */
1663         mod_timer(&bp->timer, jiffies + bp->current_interval);
1664
1665 #ifdef BCM_CNIC
1666         bnx2x_setup_cnic_irq_info(bp);
1667         if (bp->state == BNX2X_STATE_OPEN)
1668                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1669 #endif
1670         bnx2x_inc_load_cnt(bp);
1671
1672         bnx2x_release_firmware(bp);
1673
1674         return 0;
1675
1676 #ifdef BCM_CNIC
1677 load_error4:
1678         /* Disable Timer scan */
1679         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1680 #endif
1681 load_error3:
1682         bnx2x_int_disable_sync(bp, 1);
1683
1684         /* Free SKBs, SGEs, TPA pool and driver internals */
1685         bnx2x_free_skbs(bp);
1686         for_each_rx_queue(bp, i)
1687                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1688
1689         /* Release IRQs */
1690         bnx2x_free_irq(bp);
1691 load_error2:
1692         if (!BP_NOMCP(bp)) {
1693                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1694                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1695         }
1696
1697         bp->port.pmf = 0;
1698 load_error1:
1699         bnx2x_napi_disable(bp);
1700 load_error0:
1701         bnx2x_free_mem(bp);
1702
1703         bnx2x_release_firmware(bp);
1704
1705         return rc;
1706 }
1707
1708 /* must be called with rtnl_lock */
1709 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1710 {
1711         int i;
1712
1713         if (bp->state == BNX2X_STATE_CLOSED) {
1714                 /* Interface has been removed - nothing to recover */
1715                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1716                 bp->is_leader = 0;
1717                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1718                 smp_wmb();
1719
1720                 return -EINVAL;
1721         }
1722
1723 #ifdef BCM_CNIC
1724         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1725 #endif
1726         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1727
1728         /* Set "drop all" */
1729         bp->rx_mode = BNX2X_RX_MODE_NONE;
1730         bnx2x_set_storm_rx_mode(bp);
1731
1732         /* Stop Tx */
1733         bnx2x_tx_disable(bp);
1734
1735         del_timer_sync(&bp->timer);
1736
1737         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1738                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1739
1740         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1741
1742         /* Cleanup the chip if needed */
1743         if (unload_mode != UNLOAD_RECOVERY)
1744                 bnx2x_chip_cleanup(bp, unload_mode);
1745         else {
1746                 /* Disable HW interrupts, NAPI and Tx */
1747                 bnx2x_netif_stop(bp, 1);
1748
1749                 /* Release IRQs */
1750                 bnx2x_free_irq(bp);
1751         }
1752
1753         bp->port.pmf = 0;
1754
1755         /* Free SKBs, SGEs, TPA pool and driver internals */
1756         bnx2x_free_skbs(bp);
1757         for_each_rx_queue(bp, i)
1758                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1759
1760         bnx2x_free_mem(bp);
1761
1762         bp->state = BNX2X_STATE_CLOSED;
1763
1764         /* The last driver must disable a "close the gate" if there is no
1765          * parity attention or "process kill" pending.
1766          */
1767         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1768             bnx2x_reset_is_done(bp))
1769                 bnx2x_disable_close_the_gate(bp);
1770
1771         /* Reset MCP mail box sequence if there is on going recovery */
1772         if (unload_mode == UNLOAD_RECOVERY)
1773                 bp->fw_seq = 0;
1774
1775         return 0;
1776 }
1777
1778 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1779 {
1780         u16 pmcsr;
1781
1782         /* If there is no power capability, silently succeed */
1783         if (!bp->pm_cap) {
1784                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1785                 return 0;
1786         }
1787
1788         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1789
1790         switch (state) {
1791         case PCI_D0:
1792                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1793                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1794                                        PCI_PM_CTRL_PME_STATUS));
1795
1796                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1797                         /* delay required during transition out of D3hot */
1798                         msleep(20);
1799                 break;
1800
1801         case PCI_D3hot:
1802                 /* If there are other clients above don't
1803                    shut down the power */
1804                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1805                         return 0;
1806                 /* Don't shut down the power for emulation and FPGA */
1807                 if (CHIP_REV_IS_SLOW(bp))
1808                         return 0;
1809
1810                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1811                 pmcsr |= 3;
1812
1813                 if (bp->wol)
1814                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1815
1816                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1817                                       pmcsr);
1818
1819                 /* No more memory access after this point until
1820                 * device is brought back to D0.
1821                 */
1822                 break;
1823
1824         default:
1825                 return -EINVAL;
1826         }
1827         return 0;
1828 }
1829
1830 /*
1831  * net_device service functions
1832  */
1833 int bnx2x_poll(struct napi_struct *napi, int budget)
1834 {
1835         int work_done = 0;
1836         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1837                                                  napi);
1838         struct bnx2x *bp = fp->bp;
1839
1840         while (1) {
1841 #ifdef BNX2X_STOP_ON_ERROR
1842                 if (unlikely(bp->panic)) {
1843                         napi_complete(napi);
1844                         return 0;
1845                 }
1846 #endif
1847
1848                 if (bnx2x_has_tx_work(fp))
1849                         bnx2x_tx_int(fp);
1850
1851                 if (bnx2x_has_rx_work(fp)) {
1852                         work_done += bnx2x_rx_int(fp, budget - work_done);
1853
1854                         /* must not complete if we consumed full budget */
1855                         if (work_done >= budget)
1856                                 break;
1857                 }
1858
1859                 /* Fall out from the NAPI loop if needed */
1860                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1861 #ifdef BCM_CNIC
1862                         /* No need to update SB for FCoE L2 ring as long as
1863                          * it's connected to the default SB and the SB
1864                          * has been updated when NAPI was scheduled.
1865                          */
1866                         if (IS_FCOE_FP(fp)) {
1867                                 napi_complete(napi);
1868                                 break;
1869                         }
1870 #endif
1871
1872                         bnx2x_update_fpsb_idx(fp);
1873                         /* bnx2x_has_rx_work() reads the status block,
1874                          * thus we need to ensure that status block indices
1875                          * have been actually read (bnx2x_update_fpsb_idx)
1876                          * prior to this check (bnx2x_has_rx_work) so that
1877                          * we won't write the "newer" value of the status block
1878                          * to IGU (if there was a DMA right after
1879                          * bnx2x_has_rx_work and if there is no rmb, the memory
1880                          * reading (bnx2x_update_fpsb_idx) may be postponed
1881                          * to right before bnx2x_ack_sb). In this case there
1882                          * will never be another interrupt until there is
1883                          * another update of the status block, while there
1884                          * is still unhandled work.
1885                          */
1886                         rmb();
1887
1888                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1889                                 napi_complete(napi);
1890                                 /* Re-enable interrupts */
1891                                 DP(NETIF_MSG_HW,
1892                                    "Update index to %d\n", fp->fp_hc_idx);
1893                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1894                                              le16_to_cpu(fp->fp_hc_idx),
1895                                              IGU_INT_ENABLE, 1);
1896                                 break;
1897                         }
1898                 }
1899         }
1900
1901         return work_done;
1902 }
1903
1904 /* we split the first BD into headers and data BDs
1905  * to ease the pain of our fellow microcode engineers
1906  * we use one mapping for both BDs
1907  * So far this has only been observed to happen
1908  * in Other Operating Systems(TM)
1909  */
1910 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1911                                    struct bnx2x_fastpath *fp,
1912                                    struct sw_tx_bd *tx_buf,
1913                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1914                                    u16 bd_prod, int nbd)
1915 {
1916         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1917         struct eth_tx_bd *d_tx_bd;
1918         dma_addr_t mapping;
1919         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1920
1921         /* first fix first BD */
1922         h_tx_bd->nbd = cpu_to_le16(nbd);
1923         h_tx_bd->nbytes = cpu_to_le16(hlen);
1924
1925         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1926            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1927            h_tx_bd->addr_lo, h_tx_bd->nbd);
1928
1929         /* now get a new data BD
1930          * (after the pbd) and fill it */
1931         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1932         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1933
1934         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1935                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1936
1937         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1938         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1939         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1940
1941         /* this marks the BD as one that has no individual mapping */
1942         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1943
1944         DP(NETIF_MSG_TX_QUEUED,
1945            "TSO split data size is %d (%x:%x)\n",
1946            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1947
1948         /* update tx_bd */
1949         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1950
1951         return bd_prod;
1952 }
1953
1954 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1955 {
1956         if (fix > 0)
1957                 csum = (u16) ~csum_fold(csum_sub(csum,
1958                                 csum_partial(t_header - fix, fix, 0)));
1959
1960         else if (fix < 0)
1961                 csum = (u16) ~csum_fold(csum_add(csum,
1962                                 csum_partial(t_header, -fix, 0)));
1963
1964         return swab16(csum);
1965 }
1966
1967 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1968 {
1969         u32 rc;
1970
1971         if (skb->ip_summed != CHECKSUM_PARTIAL)
1972                 rc = XMIT_PLAIN;
1973
1974         else {
1975                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1976                         rc = XMIT_CSUM_V6;
1977                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1978                                 rc |= XMIT_CSUM_TCP;
1979
1980                 } else {
1981                         rc = XMIT_CSUM_V4;
1982                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1983                                 rc |= XMIT_CSUM_TCP;
1984                 }
1985         }
1986
1987         if (skb_is_gso_v6(skb))
1988                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1989         else if (skb_is_gso(skb))
1990                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1991
1992         return rc;
1993 }
1994
1995 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1996 /* check if packet requires linearization (packet is too fragmented)
1997    no need to check fragmentation if page size > 8K (there will be no
1998    violation to FW restrictions) */
1999 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2000                              u32 xmit_type)
2001 {
2002         int to_copy = 0;
2003         int hlen = 0;
2004         int first_bd_sz = 0;
2005
2006         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2007         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2008
2009                 if (xmit_type & XMIT_GSO) {
2010                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2011                         /* Check if LSO packet needs to be copied:
2012                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2013                         int wnd_size = MAX_FETCH_BD - 3;
2014                         /* Number of windows to check */
2015                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2016                         int wnd_idx = 0;
2017                         int frag_idx = 0;
2018                         u32 wnd_sum = 0;
2019
2020                         /* Headers length */
2021                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2022                                 tcp_hdrlen(skb);
2023
2024                         /* Amount of data (w/o headers) on linear part of SKB*/
2025                         first_bd_sz = skb_headlen(skb) - hlen;
2026
2027                         wnd_sum  = first_bd_sz;
2028
2029                         /* Calculate the first sum - it's special */
2030                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2031                                 wnd_sum +=
2032                                         skb_shinfo(skb)->frags[frag_idx].size;
2033
2034                         /* If there was data on linear skb data - check it */
2035                         if (first_bd_sz > 0) {
2036                                 if (unlikely(wnd_sum < lso_mss)) {
2037                                         to_copy = 1;
2038                                         goto exit_lbl;
2039                                 }
2040
2041                                 wnd_sum -= first_bd_sz;
2042                         }
2043
2044                         /* Others are easier: run through the frag list and
2045                            check all windows */
2046                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2047                                 wnd_sum +=
2048                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2049
2050                                 if (unlikely(wnd_sum < lso_mss)) {
2051                                         to_copy = 1;
2052                                         break;
2053                                 }
2054                                 wnd_sum -=
2055                                         skb_shinfo(skb)->frags[wnd_idx].size;
2056                         }
2057                 } else {
2058                         /* in non-LSO too fragmented packet should always
2059                            be linearized */
2060                         to_copy = 1;
2061                 }
2062         }
2063
2064 exit_lbl:
2065         if (unlikely(to_copy))
2066                 DP(NETIF_MSG_TX_QUEUED,
2067                    "Linearization IS REQUIRED for %s packet. "
2068                    "num_frags %d  hlen %d  first_bd_sz %d\n",
2069                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2070                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2071
2072         return to_copy;
2073 }
2074 #endif
2075
2076 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2077                                         u32 xmit_type)
2078 {
2079         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2080                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2081                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2082         if ((xmit_type & XMIT_GSO_V6) &&
2083             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2084                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2085 }
2086
2087 /**
2088  * bnx2x_set_pbd_gso - update PBD in GSO case.
2089  *
2090  * @skb:        packet skb
2091  * @pbd:        parse BD
2092  * @xmit_type:  xmit flags
2093  */
2094 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2095                                      struct eth_tx_parse_bd_e1x *pbd,
2096                                      u32 xmit_type)
2097 {
2098         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2099         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2100         pbd->tcp_flags = pbd_tcp_flags(skb);
2101
2102         if (xmit_type & XMIT_GSO_V4) {
2103                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2104                 pbd->tcp_pseudo_csum =
2105                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2106                                                   ip_hdr(skb)->daddr,
2107                                                   0, IPPROTO_TCP, 0));
2108
2109         } else
2110                 pbd->tcp_pseudo_csum =
2111                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2112                                                 &ipv6_hdr(skb)->daddr,
2113                                                 0, IPPROTO_TCP, 0));
2114
2115         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2116 }
2117
2118 /**
2119  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2120  *
2121  * @bp:                 driver handle
2122  * @skb:                packet skb
2123  * @parsing_data:       data to be updated
2124  * @xmit_type:          xmit flags
2125  *
2126  * 57712 related
2127  */
2128 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2129         u32 *parsing_data, u32 xmit_type)
2130 {
2131         *parsing_data |=
2132                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2133                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2134                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2135
2136         if (xmit_type & XMIT_CSUM_TCP) {
2137                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2138                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2139                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2140
2141                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2142         } else
2143                 /* We support checksum offload for TCP and UDP only.
2144                  * No need to pass the UDP header length - it's a constant.
2145                  */
2146                 return skb_transport_header(skb) +
2147                                 sizeof(struct udphdr) - skb->data;
2148 }
2149
2150 /**
2151  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2152  *
2153  * @bp:         driver handle
2154  * @skb:        packet skb
2155  * @pbd:        parse BD to be updated
2156  * @xmit_type:  xmit flags
2157  */
2158 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2159         struct eth_tx_parse_bd_e1x *pbd,
2160         u32 xmit_type)
2161 {
2162         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2163
2164         /* for now NS flag is not used in Linux */
2165         pbd->global_data =
2166                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2167                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2168
2169         pbd->ip_hlen_w = (skb_transport_header(skb) -
2170                         skb_network_header(skb)) >> 1;
2171
2172         hlen += pbd->ip_hlen_w;
2173
2174         /* We support checksum offload for TCP and UDP only */
2175         if (xmit_type & XMIT_CSUM_TCP)
2176                 hlen += tcp_hdrlen(skb) / 2;
2177         else
2178                 hlen += sizeof(struct udphdr) / 2;
2179
2180         pbd->total_hlen_w = cpu_to_le16(hlen);
2181         hlen = hlen*2;
2182
2183         if (xmit_type & XMIT_CSUM_TCP) {
2184                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2185
2186         } else {
2187                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2188
2189                 DP(NETIF_MSG_TX_QUEUED,
2190                    "hlen %d  fix %d  csum before fix %x\n",
2191                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2192
2193                 /* HW bug: fixup the CSUM */
2194                 pbd->tcp_pseudo_csum =
2195                         bnx2x_csum_fix(skb_transport_header(skb),
2196                                        SKB_CS(skb), fix);
2197
2198                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2199                    pbd->tcp_pseudo_csum);
2200         }
2201
2202         return hlen;
2203 }
2204
2205 /* called with netif_tx_lock
2206  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2207  * netif_wake_queue()
2208  */
2209 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2210 {
2211         struct bnx2x *bp = netdev_priv(dev);
2212         struct bnx2x_fastpath *fp;
2213         struct netdev_queue *txq;
2214         struct sw_tx_bd *tx_buf;
2215         struct eth_tx_start_bd *tx_start_bd;
2216         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2217         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2218         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2219         u32 pbd_e2_parsing_data = 0;
2220         u16 pkt_prod, bd_prod;
2221         int nbd, fp_index;
2222         dma_addr_t mapping;
2223         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2224         int i;
2225         u8 hlen = 0;
2226         __le16 pkt_size = 0;
2227         struct ethhdr *eth;
2228         u8 mac_type = UNICAST_ADDRESS;
2229
2230 #ifdef BNX2X_STOP_ON_ERROR
2231         if (unlikely(bp->panic))
2232                 return NETDEV_TX_BUSY;
2233 #endif
2234
2235         fp_index = skb_get_queue_mapping(skb);
2236         txq = netdev_get_tx_queue(dev, fp_index);
2237
2238         fp = &bp->fp[fp_index];
2239
2240         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2241                 fp->eth_q_stats.driver_xoff++;
2242                 netif_tx_stop_queue(txq);
2243                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2244                 return NETDEV_TX_BUSY;
2245         }
2246
2247         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2248                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2249            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2250            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2251
2252         eth = (struct ethhdr *)skb->data;
2253
2254         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2255         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2256                 if (is_broadcast_ether_addr(eth->h_dest))
2257                         mac_type = BROADCAST_ADDRESS;
2258                 else
2259                         mac_type = MULTICAST_ADDRESS;
2260         }
2261
2262 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2263         /* First, check if we need to linearize the skb (due to FW
2264            restrictions). No need to check fragmentation if page size > 8K
2265            (there will be no violation to FW restrictions) */
2266         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2267                 /* Statistics of linearization */
2268                 bp->lin_cnt++;
2269                 if (skb_linearize(skb) != 0) {
2270                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2271                            "silently dropping this SKB\n");
2272                         dev_kfree_skb_any(skb);
2273                         return NETDEV_TX_OK;
2274                 }
2275         }
2276 #endif
2277
2278         /*
2279         Please read carefully. First we use one BD which we mark as start,
2280         then we have a parsing info BD (used for TSO or xsum),
2281         and only then we have the rest of the TSO BDs.
2282         (don't forget to mark the last one as last,
2283         and to unmap only AFTER you write to the BD ...)
2284         And above all, all pdb sizes are in words - NOT DWORDS!
2285         */
2286
2287         pkt_prod = fp->tx_pkt_prod++;
2288         bd_prod = TX_BD(fp->tx_bd_prod);
2289
2290         /* get a tx_buf and first BD */
2291         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2292         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2293
2294         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2295         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2296                  mac_type);
2297
2298         /* header nbd */
2299         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2300
2301         /* remember the first BD of the packet */
2302         tx_buf->first_bd = fp->tx_bd_prod;
2303         tx_buf->skb = skb;
2304         tx_buf->flags = 0;
2305
2306         DP(NETIF_MSG_TX_QUEUED,
2307            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2308            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2309
2310         if (vlan_tx_tag_present(skb)) {
2311                 tx_start_bd->vlan_or_ethertype =
2312                     cpu_to_le16(vlan_tx_tag_get(skb));
2313                 tx_start_bd->bd_flags.as_bitfield |=
2314                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2315         } else
2316                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2317
2318         /* turn on parsing and get a BD */
2319         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2320
2321         if (xmit_type & XMIT_CSUM) {
2322                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2323
2324                 if (xmit_type & XMIT_CSUM_V4)
2325                         tx_start_bd->bd_flags.as_bitfield |=
2326                                                 ETH_TX_BD_FLAGS_IP_CSUM;
2327                 else
2328                         tx_start_bd->bd_flags.as_bitfield |=
2329                                                 ETH_TX_BD_FLAGS_IPV6;
2330
2331                 if (!(xmit_type & XMIT_CSUM_TCP))
2332                         tx_start_bd->bd_flags.as_bitfield |=
2333                                                 ETH_TX_BD_FLAGS_IS_UDP;
2334         }
2335
2336         if (CHIP_IS_E2(bp)) {
2337                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2338                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2339                 /* Set PBD in checksum offload case */
2340                 if (xmit_type & XMIT_CSUM)
2341                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2342                                                      &pbd_e2_parsing_data,
2343                                                      xmit_type);
2344         } else {
2345                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2346                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2347                 /* Set PBD in checksum offload case */
2348                 if (xmit_type & XMIT_CSUM)
2349                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2350
2351         }
2352
2353         /* Map skb linear data for DMA */
2354         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2355                                  skb_headlen(skb), DMA_TO_DEVICE);
2356
2357         /* Setup the data pointer of the first BD of the packet */
2358         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2359         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2360         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2361         tx_start_bd->nbd = cpu_to_le16(nbd);
2362         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2363         pkt_size = tx_start_bd->nbytes;
2364
2365         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2366            "  nbytes %d  flags %x  vlan %x\n",
2367            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2368            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2369            tx_start_bd->bd_flags.as_bitfield,
2370            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2371
2372         if (xmit_type & XMIT_GSO) {
2373
2374                 DP(NETIF_MSG_TX_QUEUED,
2375                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2376                    skb->len, hlen, skb_headlen(skb),
2377                    skb_shinfo(skb)->gso_size);
2378
2379                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2380
2381                 if (unlikely(skb_headlen(skb) > hlen))
2382                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2383                                                  hlen, bd_prod, ++nbd);
2384                 if (CHIP_IS_E2(bp))
2385                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2386                                              xmit_type);
2387                 else
2388                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2389         }
2390
2391         /* Set the PBD's parsing_data field if not zero
2392          * (for the chips newer than 57711).
2393          */
2394         if (pbd_e2_parsing_data)
2395                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2396
2397         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2398
2399         /* Handle fragmented skb */
2400         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2401                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2402
2403                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2404                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2405                 if (total_pkt_bd == NULL)
2406                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2407
2408                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2409                                        frag->page_offset,
2410                                        frag->size, DMA_TO_DEVICE);
2411
2412                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2413                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2414                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2415                 le16_add_cpu(&pkt_size, frag->size);
2416
2417                 DP(NETIF_MSG_TX_QUEUED,
2418                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2419                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2420                    le16_to_cpu(tx_data_bd->nbytes));
2421         }
2422
2423         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2424
2425         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2426
2427         /* now send a tx doorbell, counting the next BD
2428          * if the packet contains or ends with it
2429          */
2430         if (TX_BD_POFF(bd_prod) < nbd)
2431                 nbd++;
2432
2433         if (total_pkt_bd != NULL)
2434                 total_pkt_bd->total_pkt_bytes = pkt_size;
2435
2436         if (pbd_e1x)
2437                 DP(NETIF_MSG_TX_QUEUED,
2438                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2439                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2440                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2441                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2442                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2443                     le16_to_cpu(pbd_e1x->total_hlen_w));
2444         if (pbd_e2)
2445                 DP(NETIF_MSG_TX_QUEUED,
2446                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2447                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2448                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2449                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2450                    pbd_e2->parsing_data);
2451         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2452
2453         /*
2454          * Make sure that the BD data is updated before updating the producer
2455          * since FW might read the BD right after the producer is updated.
2456          * This is only applicable for weak-ordered memory model archs such
2457          * as IA-64. The following barrier is also mandatory since FW will
2458          * assumes packets must have BDs.
2459          */
2460         wmb();
2461
2462         fp->tx_db.data.prod += nbd;
2463         barrier();
2464
2465         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2466
2467         mmiowb();
2468
2469         fp->tx_bd_prod += nbd;
2470
2471         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2472                 netif_tx_stop_queue(txq);
2473
2474                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2475                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2476                  * fp->bd_tx_cons */
2477                 smp_mb();
2478
2479                 fp->eth_q_stats.driver_xoff++;
2480                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2481                         netif_tx_wake_queue(txq);
2482         }
2483         fp->tx_pkt++;
2484
2485         return NETDEV_TX_OK;
2486 }
2487
2488 /* called with rtnl_lock */
2489 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2490 {
2491         struct sockaddr *addr = p;
2492         struct bnx2x *bp = netdev_priv(dev);
2493
2494         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2495                 return -EINVAL;
2496
2497         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2498         if (netif_running(dev))
2499                 bnx2x_set_eth_mac(bp, 1);
2500
2501         return 0;
2502 }
2503
2504 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2505 {
2506         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2507         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2508
2509         /* Common */
2510 #ifdef BCM_CNIC
2511         if (IS_FCOE_IDX(fp_index)) {
2512                 memset(sb, 0, sizeof(union host_hc_status_block));
2513                 fp->status_blk_mapping = 0;
2514
2515         } else {
2516 #endif
2517                 /* status blocks */
2518                 if (CHIP_IS_E2(bp))
2519                         BNX2X_PCI_FREE(sb->e2_sb,
2520                                        bnx2x_fp(bp, fp_index,
2521                                                 status_blk_mapping),
2522                                        sizeof(struct host_hc_status_block_e2));
2523                 else
2524                         BNX2X_PCI_FREE(sb->e1x_sb,
2525                                        bnx2x_fp(bp, fp_index,
2526                                                 status_blk_mapping),
2527                                        sizeof(struct host_hc_status_block_e1x));
2528 #ifdef BCM_CNIC
2529         }
2530 #endif
2531         /* Rx */
2532         if (!skip_rx_queue(bp, fp_index)) {
2533                 bnx2x_free_rx_bds(fp);
2534
2535                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2536                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2537                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2538                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
2539                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
2540
2541                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2542                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
2543                                sizeof(struct eth_fast_path_rx_cqe) *
2544                                NUM_RCQ_BD);
2545
2546                 /* SGE ring */
2547                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2548                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2549                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
2550                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2551         }
2552
2553         /* Tx */
2554         if (!skip_tx_queue(bp, fp_index)) {
2555                 /* fastpath tx rings: tx_buf tx_desc */
2556                 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2557                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2558                                bnx2x_fp(bp, fp_index, tx_desc_mapping),
2559                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2560         }
2561         /* end of fastpath */
2562 }
2563
2564 void bnx2x_free_fp_mem(struct bnx2x *bp)
2565 {
2566         int i;
2567         for_each_queue(bp, i)
2568                 bnx2x_free_fp_mem_at(bp, i);
2569 }
2570
2571 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2572 {
2573         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2574         if (CHIP_IS_E2(bp)) {
2575                 bnx2x_fp(bp, index, sb_index_values) =
2576                         (__le16 *)status_blk.e2_sb->sb.index_values;
2577                 bnx2x_fp(bp, index, sb_running_index) =
2578                         (__le16 *)status_blk.e2_sb->sb.running_index;
2579         } else {
2580                 bnx2x_fp(bp, index, sb_index_values) =
2581                         (__le16 *)status_blk.e1x_sb->sb.index_values;
2582                 bnx2x_fp(bp, index, sb_running_index) =
2583                         (__le16 *)status_blk.e1x_sb->sb.running_index;
2584         }
2585 }
2586
2587 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2588 {
2589         union host_hc_status_block *sb;
2590         struct bnx2x_fastpath *fp = &bp->fp[index];
2591         int ring_size = 0;
2592
2593         /* if rx_ring_size specified - use it */
2594         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2595                            MAX_RX_AVAIL/bp->num_queues;
2596
2597         /* allocate at least number of buffers required by FW */
2598         rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2599                                                     MIN_RX_SIZE_TPA,
2600                                   rx_ring_size);
2601
2602         bnx2x_fp(bp, index, bp) = bp;
2603         bnx2x_fp(bp, index, index) = index;
2604
2605         /* Common */
2606         sb = &bnx2x_fp(bp, index, status_blk);
2607 #ifdef BCM_CNIC
2608         if (!IS_FCOE_IDX(index)) {
2609 #endif
2610                 /* status blocks */
2611                 if (CHIP_IS_E2(bp))
2612                         BNX2X_PCI_ALLOC(sb->e2_sb,
2613                                 &bnx2x_fp(bp, index, status_blk_mapping),
2614                                 sizeof(struct host_hc_status_block_e2));
2615                 else
2616                         BNX2X_PCI_ALLOC(sb->e1x_sb,
2617                                 &bnx2x_fp(bp, index, status_blk_mapping),
2618                             sizeof(struct host_hc_status_block_e1x));
2619 #ifdef BCM_CNIC
2620         }
2621 #endif
2622         set_sb_shortcuts(bp, index);
2623
2624         /* Tx */
2625         if (!skip_tx_queue(bp, index)) {
2626                 /* fastpath tx rings: tx_buf tx_desc */
2627                 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2628                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2629                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2630                                 &bnx2x_fp(bp, index, tx_desc_mapping),
2631                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2632         }
2633
2634         /* Rx */
2635         if (!skip_rx_queue(bp, index)) {
2636                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2637                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2638                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2639                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2640                                 &bnx2x_fp(bp, index, rx_desc_mapping),
2641                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2642
2643                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2644                                 &bnx2x_fp(bp, index, rx_comp_mapping),
2645                                 sizeof(struct eth_fast_path_rx_cqe) *
2646                                 NUM_RCQ_BD);
2647
2648                 /* SGE ring */
2649                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2650                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2651                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2652                                 &bnx2x_fp(bp, index, rx_sge_mapping),
2653                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2654                 /* RX BD ring */
2655                 bnx2x_set_next_page_rx_bd(fp);
2656
2657                 /* CQ ring */
2658                 bnx2x_set_next_page_rx_cq(fp);
2659
2660                 /* BDs */
2661                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2662                 if (ring_size < rx_ring_size)
2663                         goto alloc_mem_err;
2664         }
2665
2666         return 0;
2667
2668 /* handles low memory cases */
2669 alloc_mem_err:
2670         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2671                                                 index, ring_size);
2672         /* FW will drop all packets if queue is not big enough,
2673          * In these cases we disable the queue
2674          * Min size diferent for TPA and non-TPA queues
2675          */
2676         if (ring_size < (fp->disable_tpa ?
2677                                 MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) {
2678                         /* release memory allocated for this queue */
2679                         bnx2x_free_fp_mem_at(bp, index);
2680                         return -ENOMEM;
2681         }
2682         return 0;
2683 }
2684
2685 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2686 {
2687         int i;
2688
2689         /**
2690          * 1. Allocate FP for leading - fatal if error
2691          * 2. {CNIC} Allocate FCoE FP - fatal if error
2692          * 3. Allocate RSS - fix number of queues if error
2693          */
2694
2695         /* leading */
2696         if (bnx2x_alloc_fp_mem_at(bp, 0))
2697                 return -ENOMEM;
2698 #ifdef BCM_CNIC
2699         /* FCoE */
2700         if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2701                 return -ENOMEM;
2702 #endif
2703         /* RSS */
2704         for_each_nondefault_eth_queue(bp, i)
2705                 if (bnx2x_alloc_fp_mem_at(bp, i))
2706                         break;
2707
2708         /* handle memory failures */
2709         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2710                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2711
2712                 WARN_ON(delta < 0);
2713 #ifdef BCM_CNIC
2714                 /**
2715                  * move non eth FPs next to last eth FP
2716                  * must be done in that order
2717                  * FCOE_IDX < FWD_IDX < OOO_IDX
2718                  */
2719
2720                 /* move FCoE fp */
2721                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2722 #endif
2723                 bp->num_queues -= delta;
2724                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2725                           bp->num_queues + delta, bp->num_queues);
2726         }
2727
2728         return 0;
2729 }
2730
2731 static int bnx2x_setup_irqs(struct bnx2x *bp)
2732 {
2733         int rc = 0;
2734         if (bp->flags & USING_MSIX_FLAG) {
2735                 rc = bnx2x_req_msix_irqs(bp);
2736                 if (rc)
2737                         return rc;
2738         } else {
2739                 bnx2x_ack_int(bp);
2740                 rc = bnx2x_req_irq(bp);
2741                 if (rc) {
2742                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
2743                         return rc;
2744                 }
2745                 if (bp->flags & USING_MSI_FLAG) {
2746                         bp->dev->irq = bp->pdev->irq;
2747                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
2748                                bp->pdev->irq);
2749                 }
2750         }
2751
2752         return 0;
2753 }
2754
2755 void bnx2x_free_mem_bp(struct bnx2x *bp)
2756 {
2757         kfree(bp->fp);
2758         kfree(bp->msix_table);
2759         kfree(bp->ilt);
2760 }
2761
2762 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2763 {
2764         struct bnx2x_fastpath *fp;
2765         struct msix_entry *tbl;
2766         struct bnx2x_ilt *ilt;
2767
2768         /* fp array */
2769         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2770         if (!fp)
2771                 goto alloc_err;
2772         bp->fp = fp;
2773
2774         /* msix table */
2775         tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2776                                   GFP_KERNEL);
2777         if (!tbl)
2778                 goto alloc_err;
2779         bp->msix_table = tbl;
2780
2781         /* ilt */
2782         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2783         if (!ilt)
2784                 goto alloc_err;
2785         bp->ilt = ilt;
2786
2787         return 0;
2788 alloc_err:
2789         bnx2x_free_mem_bp(bp);
2790         return -ENOMEM;
2791
2792 }
2793
2794 static int bnx2x_reload_if_running(struct net_device *dev)
2795 {
2796         struct bnx2x *bp = netdev_priv(dev);
2797
2798         if (unlikely(!netif_running(dev)))
2799                 return 0;
2800
2801         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2802         return bnx2x_nic_load(bp, LOAD_NORMAL);
2803 }
2804
2805 /* called with rtnl_lock */
2806 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2807 {
2808         struct bnx2x *bp = netdev_priv(dev);
2809
2810         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2811                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2812                 return -EAGAIN;
2813         }
2814
2815         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2816             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2817                 return -EINVAL;
2818
2819         /* This does not race with packet allocation
2820          * because the actual alloc size is
2821          * only updated as part of load
2822          */
2823         dev->mtu = new_mtu;
2824
2825         return bnx2x_reload_if_running(dev);
2826 }
2827
2828 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2829 {
2830         struct bnx2x *bp = netdev_priv(dev);
2831
2832         /* TPA requires Rx CSUM offloading */
2833         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2834                 features &= ~NETIF_F_LRO;
2835
2836         return features;
2837 }
2838
2839 int bnx2x_set_features(struct net_device *dev, u32 features)
2840 {
2841         struct bnx2x *bp = netdev_priv(dev);
2842         u32 flags = bp->flags;
2843         bool bnx2x_reload = false;
2844
2845         if (features & NETIF_F_LRO)
2846                 flags |= TPA_ENABLE_FLAG;
2847         else
2848                 flags &= ~TPA_ENABLE_FLAG;
2849
2850         if (features & NETIF_F_LOOPBACK) {
2851                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2852                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
2853                         bnx2x_reload = true;
2854                 }
2855         } else {
2856                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2857                         bp->link_params.loopback_mode = LOOPBACK_NONE;
2858                         bnx2x_reload = true;
2859                 }
2860         }
2861
2862         if (flags ^ bp->flags) {
2863                 bp->flags = flags;
2864                 bnx2x_reload = true;
2865         }
2866
2867         if (bnx2x_reload) {
2868                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2869                         return bnx2x_reload_if_running(dev);
2870                 /* else: bnx2x_nic_load() will be called at end of recovery */
2871         }
2872
2873         return 0;
2874 }
2875
2876 void bnx2x_tx_timeout(struct net_device *dev)
2877 {
2878         struct bnx2x *bp = netdev_priv(dev);
2879
2880 #ifdef BNX2X_STOP_ON_ERROR
2881         if (!bp->panic)
2882                 bnx2x_panic();
2883 #endif
2884         /* This allows the netif to be shutdown gracefully before resetting */
2885         schedule_delayed_work(&bp->reset_task, 0);
2886 }
2887
2888 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2889 {
2890         struct net_device *dev = pci_get_drvdata(pdev);
2891         struct bnx2x *bp;
2892
2893         if (!dev) {
2894                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2895                 return -ENODEV;
2896         }
2897         bp = netdev_priv(dev);
2898
2899         rtnl_lock();
2900
2901         pci_save_state(pdev);
2902
2903         if (!netif_running(dev)) {
2904                 rtnl_unlock();
2905                 return 0;
2906         }
2907
2908         netif_device_detach(dev);
2909
2910         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2911
2912         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2913
2914         rtnl_unlock();
2915
2916         return 0;
2917 }
2918
2919 int bnx2x_resume(struct pci_dev *pdev)
2920 {
2921         struct net_device *dev = pci_get_drvdata(pdev);
2922         struct bnx2x *bp;
2923         int rc;
2924
2925         if (!dev) {
2926                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2927                 return -ENODEV;
2928         }
2929         bp = netdev_priv(dev);
2930
2931         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2932                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2933                 return -EAGAIN;
2934         }
2935
2936         rtnl_lock();
2937
2938         pci_restore_state(pdev);
2939
2940         if (!netif_running(dev)) {
2941                 rtnl_unlock();
2942                 return 0;
2943         }
2944
2945         bnx2x_set_power_state(bp, PCI_D0);
2946         netif_device_attach(dev);
2947
2948         /* Since the chip was reset, clear the FW sequence number */
2949         bp->fw_seq = 0;
2950         rc = bnx2x_nic_load(bp, LOAD_OPEN);
2951
2952         rtnl_unlock();
2953
2954         return rc;
2955 }