Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/ip.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include <linux/prefetch.h>
25 #include "bnx2x_cmn.h"
26
27 #include "bnx2x_init.h"
28
29 static int bnx2x_setup_irqs(struct bnx2x *bp);
30
31 /**
32  * bnx2x_bz_fp - zero content of the fastpath structure.
33  *
34  * @bp:         driver handle
35  * @index:      fastpath index to be zeroed
36  *
37  * Makes sure the contents of the bp->fp[index].napi is kept
38  * intact.
39  */
40 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
41 {
42         struct bnx2x_fastpath *fp = &bp->fp[index];
43         struct napi_struct orig_napi = fp->napi;
44         /* bzero bnx2x_fastpath contents */
45         memset(fp, 0, sizeof(*fp));
46
47         /* Restore the NAPI object as it has been already initialized */
48         fp->napi = orig_napi;
49 }
50
51 /**
52  * bnx2x_move_fp - move content of the fastpath structure.
53  *
54  * @bp:         driver handle
55  * @from:       source FP index
56  * @to:         destination FP index
57  *
58  * Makes sure the contents of the bp->fp[to].napi is kept
59  * intact.
60  */
61 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
62 {
63         struct bnx2x_fastpath *from_fp = &bp->fp[from];
64         struct bnx2x_fastpath *to_fp = &bp->fp[to];
65         struct napi_struct orig_napi = to_fp->napi;
66         /* Move bnx2x_fastpath contents */
67         memcpy(to_fp, from_fp, sizeof(*to_fp));
68         to_fp->index = to;
69
70         /* Restore the NAPI object as it has been already initialized */
71         to_fp->napi = orig_napi;
72 }
73
74 /* free skb in the packet ring at pos idx
75  * return idx of last bd freed
76  */
77 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
78                              u16 idx)
79 {
80         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
81         struct eth_tx_start_bd *tx_start_bd;
82         struct eth_tx_bd *tx_data_bd;
83         struct sk_buff *skb = tx_buf->skb;
84         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
85         int nbd;
86
87         /* prefetch skb end pointer to speedup dev_kfree_skb() */
88         prefetch(&skb->end);
89
90         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
91            idx, tx_buf, skb);
92
93         /* unmap first bd */
94         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
95         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
96         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
97                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
98
99         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
100 #ifdef BNX2X_STOP_ON_ERROR
101         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
102                 BNX2X_ERR("BAD nbd!\n");
103                 bnx2x_panic();
104         }
105 #endif
106         new_cons = nbd + tx_buf->first_bd;
107
108         /* Get the next bd */
109         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
110
111         /* Skip a parse bd... */
112         --nbd;
113         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
114
115         /* ...and the TSO split header bd since they have no mapping */
116         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
117                 --nbd;
118                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
119         }
120
121         /* now free frags */
122         while (nbd > 0) {
123
124                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
125                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
126                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
127                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
128                 if (--nbd)
129                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
130         }
131
132         /* release skb */
133         WARN_ON(!skb);
134         dev_kfree_skb_any(skb);
135         tx_buf->first_bd = 0;
136         tx_buf->skb = NULL;
137
138         return new_cons;
139 }
140
141 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
142 {
143         struct bnx2x *bp = fp->bp;
144         struct netdev_queue *txq;
145         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
146
147 #ifdef BNX2X_STOP_ON_ERROR
148         if (unlikely(bp->panic))
149                 return -1;
150 #endif
151
152         txq = netdev_get_tx_queue(bp->dev, fp->index);
153         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
154         sw_cons = fp->tx_pkt_cons;
155
156         while (sw_cons != hw_cons) {
157                 u16 pkt_cons;
158
159                 pkt_cons = TX_BD(sw_cons);
160
161                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
162                                       " pkt_cons %u\n",
163                    fp->index, hw_cons, sw_cons, pkt_cons);
164
165                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
166                 sw_cons++;
167         }
168
169         fp->tx_pkt_cons = sw_cons;
170         fp->tx_bd_cons = bd_cons;
171
172         /* Need to make the tx_bd_cons update visible to start_xmit()
173          * before checking for netif_tx_queue_stopped().  Without the
174          * memory barrier, there is a small possibility that
175          * start_xmit() will miss it and cause the queue to be stopped
176          * forever.
177          */
178         smp_mb();
179
180         if (unlikely(netif_tx_queue_stopped(txq))) {
181                 /* Taking tx_lock() is needed to prevent reenabling the queue
182                  * while it's empty. This could have happen if rx_action() gets
183                  * suspended in bnx2x_tx_int() after the condition before
184                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
185                  *
186                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
187                  * sends some packets consuming the whole queue again->
188                  * stops the queue
189                  */
190
191                 __netif_tx_lock(txq, smp_processor_id());
192
193                 if ((netif_tx_queue_stopped(txq)) &&
194                     (bp->state == BNX2X_STATE_OPEN) &&
195                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
196                         netif_tx_wake_queue(txq);
197
198                 __netif_tx_unlock(txq);
199         }
200         return 0;
201 }
202
203 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
204                                              u16 idx)
205 {
206         u16 last_max = fp->last_max_sge;
207
208         if (SUB_S16(idx, last_max) > 0)
209                 fp->last_max_sge = idx;
210 }
211
212 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
213                                   struct eth_fast_path_rx_cqe *fp_cqe)
214 {
215         struct bnx2x *bp = fp->bp;
216         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
217                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
218                       SGE_PAGE_SHIFT;
219         u16 last_max, last_elem, first_elem;
220         u16 delta = 0;
221         u16 i;
222
223         if (!sge_len)
224                 return;
225
226         /* First mark all used pages */
227         for (i = 0; i < sge_len; i++)
228                 SGE_MASK_CLEAR_BIT(fp,
229                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
230
231         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
232            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
233
234         /* Here we assume that the last SGE index is the biggest */
235         prefetch((void *)(fp->sge_mask));
236         bnx2x_update_last_max_sge(fp,
237                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
238
239         last_max = RX_SGE(fp->last_max_sge);
240         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
241         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
242
243         /* If ring is not full */
244         if (last_elem + 1 != first_elem)
245                 last_elem++;
246
247         /* Now update the prod */
248         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
249                 if (likely(fp->sge_mask[i]))
250                         break;
251
252                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
253                 delta += RX_SGE_MASK_ELEM_SZ;
254         }
255
256         if (delta > 0) {
257                 fp->rx_sge_prod += delta;
258                 /* clear page-end entries */
259                 bnx2x_clear_sge_mask_next_elems(fp);
260         }
261
262         DP(NETIF_MSG_RX_STATUS,
263            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
264            fp->last_max_sge, fp->rx_sge_prod);
265 }
266
267 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
268                             struct sk_buff *skb, u16 cons, u16 prod)
269 {
270         struct bnx2x *bp = fp->bp;
271         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
272         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
273         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
274         dma_addr_t mapping;
275
276         /* move empty skb from pool to prod and map it */
277         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
278         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
279                                  fp->rx_buf_size, DMA_FROM_DEVICE);
280         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
281
282         /* move partial skb from cons to pool (don't unmap yet) */
283         fp->tpa_pool[queue] = *cons_rx_buf;
284
285         /* mark bin state as start - print error if current state != stop */
286         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
287                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
288
289         fp->tpa_state[queue] = BNX2X_TPA_START;
290
291         /* point prod_bd to new skb */
292         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
293         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
294
295 #ifdef BNX2X_STOP_ON_ERROR
296         fp->tpa_queue_used |= (1 << queue);
297 #ifdef _ASM_GENERIC_INT_L64_H
298         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
299 #else
300         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
301 #endif
302            fp->tpa_queue_used);
303 #endif
304 }
305
306 /* Timestamp option length allowed for TPA aggregation:
307  *
308  *              nop nop kind length echo val
309  */
310 #define TPA_TSTAMP_OPT_LEN      12
311 /**
312  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
313  *
314  * @bp:                 driver handle
315  * @parsing_flags:      parsing flags from the START CQE
316  * @len_on_bd:          total length of the first packet for the
317  *                      aggregation.
318  *
319  * Approximate value of the MSS for this aggregation calculated using
320  * the first packet of it.
321  */
322 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
323                                     u16 len_on_bd)
324 {
325         /* TPA arrgregation won't have an IP options and TCP options
326          * other than timestamp.
327          */
328         u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
329
330
331         /* Check if there was a TCP timestamp, if there is it's will
332          * always be 12 bytes length: nop nop kind length echo val.
333          *
334          * Otherwise FW would close the aggregation.
335          */
336         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
337                 hdrs_len += TPA_TSTAMP_OPT_LEN;
338
339         return len_on_bd - hdrs_len;
340 }
341
342 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
343                                struct sk_buff *skb,
344                                struct eth_fast_path_rx_cqe *fp_cqe,
345                                u16 cqe_idx, u16 parsing_flags)
346 {
347         struct sw_rx_page *rx_pg, old_rx_pg;
348         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
349         u32 i, frag_len, frag_size, pages;
350         int err;
351         int j;
352
353         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
354         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
355
356         /* This is needed in order to enable forwarding support */
357         if (frag_size)
358                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
359                                                               len_on_bd);
360
361 #ifdef BNX2X_STOP_ON_ERROR
362         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
363                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
364                           pages, cqe_idx);
365                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
366                           fp_cqe->pkt_len, len_on_bd);
367                 bnx2x_panic();
368                 return -EINVAL;
369         }
370 #endif
371
372         /* Run through the SGL and compose the fragmented skb */
373         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
374                 u16 sge_idx =
375                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
376
377                 /* FW gives the indices of the SGE as if the ring is an array
378                    (meaning that "next" element will consume 2 indices) */
379                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
380                 rx_pg = &fp->rx_page_ring[sge_idx];
381                 old_rx_pg = *rx_pg;
382
383                 /* If we fail to allocate a substitute page, we simply stop
384                    where we are and drop the whole packet */
385                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
386                 if (unlikely(err)) {
387                         fp->eth_q_stats.rx_skb_alloc_failed++;
388                         return err;
389                 }
390
391                 /* Unmap the page as we r going to pass it to the stack */
392                 dma_unmap_page(&bp->pdev->dev,
393                                dma_unmap_addr(&old_rx_pg, mapping),
394                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
395
396                 /* Add one frag and update the appropriate fields in the skb */
397                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
398
399                 skb->data_len += frag_len;
400                 skb->truesize += frag_len;
401                 skb->len += frag_len;
402
403                 frag_size -= frag_len;
404         }
405
406         return 0;
407 }
408
409 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
410                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
411                            u16 cqe_idx)
412 {
413         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
414         struct sk_buff *skb = rx_buf->skb;
415         /* alloc new skb */
416         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
417
418         /* Unmap skb in the pool anyway, as we are going to change
419            pool entry status to BNX2X_TPA_STOP even if new skb allocation
420            fails. */
421         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
422                          fp->rx_buf_size, DMA_FROM_DEVICE);
423
424         if (likely(new_skb)) {
425                 /* fix ip xsum and give it to the stack */
426                 /* (no need to map the new skb) */
427                 u16 parsing_flags =
428                         le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
429
430                 prefetch(skb);
431                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
432
433 #ifdef BNX2X_STOP_ON_ERROR
434                 if (pad + len > fp->rx_buf_size) {
435                         BNX2X_ERR("skb_put is about to fail...  "
436                                   "pad %d  len %d  rx_buf_size %d\n",
437                                   pad, len, fp->rx_buf_size);
438                         bnx2x_panic();
439                         return;
440                 }
441 #endif
442
443                 skb_reserve(skb, pad);
444                 skb_put(skb, len);
445
446                 skb->protocol = eth_type_trans(skb, bp->dev);
447                 skb->ip_summed = CHECKSUM_UNNECESSARY;
448
449                 {
450                         struct iphdr *iph;
451
452                         iph = (struct iphdr *)skb->data;
453                         iph->check = 0;
454                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
455                 }
456
457                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
458                                          &cqe->fast_path_cqe, cqe_idx,
459                                          parsing_flags)) {
460                         if (parsing_flags & PARSING_FLAGS_VLAN)
461                                 __vlan_hwaccel_put_tag(skb,
462                                                  le16_to_cpu(cqe->fast_path_cqe.
463                                                              vlan_tag));
464                         napi_gro_receive(&fp->napi, skb);
465                 } else {
466                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
467                            " - dropping packet!\n");
468                         dev_kfree_skb_any(skb);
469                 }
470
471
472                 /* put new skb in bin */
473                 fp->tpa_pool[queue].skb = new_skb;
474
475         } else {
476                 /* else drop the packet and keep the buffer in the bin */
477                 DP(NETIF_MSG_RX_STATUS,
478                    "Failed to allocate new skb - dropping packet!\n");
479                 fp->eth_q_stats.rx_skb_alloc_failed++;
480         }
481
482         fp->tpa_state[queue] = BNX2X_TPA_STOP;
483 }
484
485 /* Set Toeplitz hash value in the skb using the value from the
486  * CQE (calculated by HW).
487  */
488 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
489                                         struct sk_buff *skb)
490 {
491         /* Set Toeplitz hash from CQE */
492         if ((bp->dev->features & NETIF_F_RXHASH) &&
493             (cqe->fast_path_cqe.status_flags &
494              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
495                 skb->rxhash =
496                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
497 }
498
499 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
500 {
501         struct bnx2x *bp = fp->bp;
502         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
503         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
504         int rx_pkt = 0;
505
506 #ifdef BNX2X_STOP_ON_ERROR
507         if (unlikely(bp->panic))
508                 return 0;
509 #endif
510
511         /* CQ "next element" is of the size of the regular element,
512            that's why it's ok here */
513         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
514         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
515                 hw_comp_cons++;
516
517         bd_cons = fp->rx_bd_cons;
518         bd_prod = fp->rx_bd_prod;
519         bd_prod_fw = bd_prod;
520         sw_comp_cons = fp->rx_comp_cons;
521         sw_comp_prod = fp->rx_comp_prod;
522
523         /* Memory barrier necessary as speculative reads of the rx
524          * buffer can be ahead of the index in the status block
525          */
526         rmb();
527
528         DP(NETIF_MSG_RX_STATUS,
529            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
530            fp->index, hw_comp_cons, sw_comp_cons);
531
532         while (sw_comp_cons != hw_comp_cons) {
533                 struct sw_rx_bd *rx_buf = NULL;
534                 struct sk_buff *skb;
535                 union eth_rx_cqe *cqe;
536                 u8 cqe_fp_flags;
537                 u16 len, pad;
538
539                 comp_ring_cons = RCQ_BD(sw_comp_cons);
540                 bd_prod = RX_BD(bd_prod);
541                 bd_cons = RX_BD(bd_cons);
542
543                 /* Prefetch the page containing the BD descriptor
544                    at producer's index. It will be needed when new skb is
545                    allocated */
546                 prefetch((void *)(PAGE_ALIGN((unsigned long)
547                                              (&fp->rx_desc_ring[bd_prod])) -
548                                   PAGE_SIZE + 1));
549
550                 cqe = &fp->rx_comp_ring[comp_ring_cons];
551                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
552
553                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
554                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
555                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
556                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
557                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
558                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
559
560                 /* is this a slowpath msg? */
561                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
562                         bnx2x_sp_event(fp, cqe);
563                         goto next_cqe;
564
565                 /* this is an rx packet */
566                 } else {
567                         rx_buf = &fp->rx_buf_ring[bd_cons];
568                         skb = rx_buf->skb;
569                         prefetch(skb);
570                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
571                         pad = cqe->fast_path_cqe.placement_offset;
572
573                         /* - If CQE is marked both TPA_START and TPA_END it is
574                          *   a non-TPA CQE.
575                          * - FP CQE will always have either TPA_START or/and
576                          *   TPA_STOP flags set.
577                          */
578                         if ((!fp->disable_tpa) &&
579                             (TPA_TYPE(cqe_fp_flags) !=
580                                         (TPA_TYPE_START | TPA_TYPE_END))) {
581                                 u16 queue = cqe->fast_path_cqe.queue_index;
582
583                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
584                                         DP(NETIF_MSG_RX_STATUS,
585                                            "calling tpa_start on queue %d\n",
586                                            queue);
587
588                                         bnx2x_tpa_start(fp, queue, skb,
589                                                         bd_cons, bd_prod);
590
591                                         /* Set Toeplitz hash for an LRO skb */
592                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
593
594                                         goto next_rx;
595                                 } else { /* TPA_STOP */
596                                         DP(NETIF_MSG_RX_STATUS,
597                                            "calling tpa_stop on queue %d\n",
598                                            queue);
599
600                                         if (!BNX2X_RX_SUM_FIX(cqe))
601                                                 BNX2X_ERR("STOP on none TCP "
602                                                           "data\n");
603
604                                         /* This is a size of the linear data
605                                            on this skb */
606                                         len = le16_to_cpu(cqe->fast_path_cqe.
607                                                                 len_on_bd);
608                                         bnx2x_tpa_stop(bp, fp, queue, pad,
609                                                     len, cqe, comp_ring_cons);
610 #ifdef BNX2X_STOP_ON_ERROR
611                                         if (bp->panic)
612                                                 return 0;
613 #endif
614
615                                         bnx2x_update_sge_prod(fp,
616                                                         &cqe->fast_path_cqe);
617                                         goto next_cqe;
618                                 }
619                         }
620
621                         dma_sync_single_for_device(&bp->pdev->dev,
622                                         dma_unmap_addr(rx_buf, mapping),
623                                                    pad + RX_COPY_THRESH,
624                                                    DMA_FROM_DEVICE);
625                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
626
627                         /* is this an error packet? */
628                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
629                                 DP(NETIF_MSG_RX_ERR,
630                                    "ERROR  flags %x  rx packet %u\n",
631                                    cqe_fp_flags, sw_comp_cons);
632                                 fp->eth_q_stats.rx_err_discard_pkt++;
633                                 goto reuse_rx;
634                         }
635
636                         /* Since we don't have a jumbo ring
637                          * copy small packets if mtu > 1500
638                          */
639                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
640                             (len <= RX_COPY_THRESH)) {
641                                 struct sk_buff *new_skb;
642
643                                 new_skb = netdev_alloc_skb(bp->dev,
644                                                            len + pad);
645                                 if (new_skb == NULL) {
646                                         DP(NETIF_MSG_RX_ERR,
647                                            "ERROR  packet dropped "
648                                            "because of alloc failure\n");
649                                         fp->eth_q_stats.rx_skb_alloc_failed++;
650                                         goto reuse_rx;
651                                 }
652
653                                 /* aligned copy */
654                                 skb_copy_from_linear_data_offset(skb, pad,
655                                                     new_skb->data + pad, len);
656                                 skb_reserve(new_skb, pad);
657                                 skb_put(new_skb, len);
658
659                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
660
661                                 skb = new_skb;
662
663                         } else
664                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
665                                 dma_unmap_single(&bp->pdev->dev,
666                                         dma_unmap_addr(rx_buf, mapping),
667                                                  fp->rx_buf_size,
668                                                  DMA_FROM_DEVICE);
669                                 skb_reserve(skb, pad);
670                                 skb_put(skb, len);
671
672                         } else {
673                                 DP(NETIF_MSG_RX_ERR,
674                                    "ERROR  packet dropped because "
675                                    "of alloc failure\n");
676                                 fp->eth_q_stats.rx_skb_alloc_failed++;
677 reuse_rx:
678                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
679                                 goto next_rx;
680                         }
681
682                         skb->protocol = eth_type_trans(skb, bp->dev);
683
684                         /* Set Toeplitz hash for a none-LRO skb */
685                         bnx2x_set_skb_rxhash(bp, cqe, skb);
686
687                         skb_checksum_none_assert(skb);
688
689                         if (bp->dev->features & NETIF_F_RXCSUM) {
690                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
691                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
692                                 else
693                                         fp->eth_q_stats.hw_csum_err++;
694                         }
695                 }
696
697                 skb_record_rx_queue(skb, fp->index);
698
699                 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
700                      PARSING_FLAGS_VLAN)
701                         __vlan_hwaccel_put_tag(skb,
702                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
703                 napi_gro_receive(&fp->napi, skb);
704
705
706 next_rx:
707                 rx_buf->skb = NULL;
708
709                 bd_cons = NEXT_RX_IDX(bd_cons);
710                 bd_prod = NEXT_RX_IDX(bd_prod);
711                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
712                 rx_pkt++;
713 next_cqe:
714                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
715                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
716
717                 if (rx_pkt == budget)
718                         break;
719         } /* while */
720
721         fp->rx_bd_cons = bd_cons;
722         fp->rx_bd_prod = bd_prod_fw;
723         fp->rx_comp_cons = sw_comp_cons;
724         fp->rx_comp_prod = sw_comp_prod;
725
726         /* Update producers */
727         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
728                              fp->rx_sge_prod);
729
730         fp->rx_pkt += rx_pkt;
731         fp->rx_calls++;
732
733         return rx_pkt;
734 }
735
736 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
737 {
738         struct bnx2x_fastpath *fp = fp_cookie;
739         struct bnx2x *bp = fp->bp;
740
741         /* Return here if interrupt is disabled */
742         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
743                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
744                 return IRQ_HANDLED;
745         }
746
747         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
748                          "[fp %d fw_sd %d igusb %d]\n",
749            fp->index, fp->fw_sb_id, fp->igu_sb_id);
750         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
751
752 #ifdef BNX2X_STOP_ON_ERROR
753         if (unlikely(bp->panic))
754                 return IRQ_HANDLED;
755 #endif
756
757         /* Handle Rx and Tx according to MSI-X vector */
758         prefetch(fp->rx_cons_sb);
759         prefetch(fp->tx_cons_sb);
760         prefetch(&fp->sb_running_index[SM_RX_ID]);
761         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
762
763         return IRQ_HANDLED;
764 }
765
766 /* HW Lock for shared dual port PHYs */
767 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
768 {
769         mutex_lock(&bp->port.phy_mutex);
770
771         if (bp->port.need_hw_lock)
772                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
773 }
774
775 void bnx2x_release_phy_lock(struct bnx2x *bp)
776 {
777         if (bp->port.need_hw_lock)
778                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
779
780         mutex_unlock(&bp->port.phy_mutex);
781 }
782
783 /* calculates MF speed according to current linespeed and MF configuration */
784 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
785 {
786         u16 line_speed = bp->link_vars.line_speed;
787         if (IS_MF(bp)) {
788                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
789                                                    bp->mf_config[BP_VN(bp)]);
790
791                 /* Calculate the current MAX line speed limit for the MF
792                  * devices
793                  */
794                 if (IS_MF_SI(bp))
795                         line_speed = (line_speed * maxCfg) / 100;
796                 else { /* SD mode */
797                         u16 vn_max_rate = maxCfg * 100;
798
799                         if (vn_max_rate < line_speed)
800                                 line_speed = vn_max_rate;
801                 }
802         }
803
804         return line_speed;
805 }
806
807 /**
808  * bnx2x_fill_report_data - fill link report data to report
809  *
810  * @bp:         driver handle
811  * @data:       link state to update
812  *
813  * It uses a none-atomic bit operations because is called under the mutex.
814  */
815 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
816                                           struct bnx2x_link_report_data *data)
817 {
818         u16 line_speed = bnx2x_get_mf_speed(bp);
819
820         memset(data, 0, sizeof(*data));
821
822         /* Fill the report data: efective line speed */
823         data->line_speed = line_speed;
824
825         /* Link is down */
826         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
827                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
828                           &data->link_report_flags);
829
830         /* Full DUPLEX */
831         if (bp->link_vars.duplex == DUPLEX_FULL)
832                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
833
834         /* Rx Flow Control is ON */
835         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
836                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
837
838         /* Tx Flow Control is ON */
839         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
840                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
841 }
842
843 /**
844  * bnx2x_link_report - report link status to OS.
845  *
846  * @bp:         driver handle
847  *
848  * Calls the __bnx2x_link_report() under the same locking scheme
849  * as a link/PHY state managing code to ensure a consistent link
850  * reporting.
851  */
852
853 void bnx2x_link_report(struct bnx2x *bp)
854 {
855         bnx2x_acquire_phy_lock(bp);
856         __bnx2x_link_report(bp);
857         bnx2x_release_phy_lock(bp);
858 }
859
860 /**
861  * __bnx2x_link_report - report link status to OS.
862  *
863  * @bp:         driver handle
864  *
865  * None atomic inmlementation.
866  * Should be called under the phy_lock.
867  */
868 void __bnx2x_link_report(struct bnx2x *bp)
869 {
870         struct bnx2x_link_report_data cur_data;
871
872         /* reread mf_cfg */
873         if (!CHIP_IS_E1(bp))
874                 bnx2x_read_mf_cfg(bp);
875
876         /* Read the current link report info */
877         bnx2x_fill_report_data(bp, &cur_data);
878
879         /* Don't report link down or exactly the same link status twice */
880         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
881             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
882                       &bp->last_reported_link.link_report_flags) &&
883              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
884                       &cur_data.link_report_flags)))
885                 return;
886
887         bp->link_cnt++;
888
889         /* We are going to report a new link parameters now -
890          * remember the current data for the next time.
891          */
892         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
893
894         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
895                      &cur_data.link_report_flags)) {
896                 netif_carrier_off(bp->dev);
897                 netdev_err(bp->dev, "NIC Link is Down\n");
898                 return;
899         } else {
900                 netif_carrier_on(bp->dev);
901                 netdev_info(bp->dev, "NIC Link is Up, ");
902                 pr_cont("%d Mbps ", cur_data.line_speed);
903
904                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
905                                        &cur_data.link_report_flags))
906                         pr_cont("full duplex");
907                 else
908                         pr_cont("half duplex");
909
910                 /* Handle the FC at the end so that only these flags would be
911                  * possibly set. This way we may easily check if there is no FC
912                  * enabled.
913                  */
914                 if (cur_data.link_report_flags) {
915                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
916                                      &cur_data.link_report_flags)) {
917                                 pr_cont(", receive ");
918                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
919                                      &cur_data.link_report_flags))
920                                         pr_cont("& transmit ");
921                         } else {
922                                 pr_cont(", transmit ");
923                         }
924                         pr_cont("flow control ON");
925                 }
926                 pr_cont("\n");
927         }
928 }
929
930 void bnx2x_init_rx_rings(struct bnx2x *bp)
931 {
932         int func = BP_FUNC(bp);
933         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
934                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
935         u16 ring_prod;
936         int i, j;
937
938         /* Allocate TPA resources */
939         for_each_rx_queue(bp, j) {
940                 struct bnx2x_fastpath *fp = &bp->fp[j];
941
942                 DP(NETIF_MSG_IFUP,
943                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
944
945                 if (!fp->disable_tpa) {
946                         /* Fill the per-aggregation pool */
947                         for (i = 0; i < max_agg_queues; i++) {
948                                 fp->tpa_pool[i].skb =
949                                    netdev_alloc_skb(bp->dev, fp->rx_buf_size);
950                                 if (!fp->tpa_pool[i].skb) {
951                                         BNX2X_ERR("Failed to allocate TPA "
952                                                   "skb pool for queue[%d] - "
953                                                   "disabling TPA on this "
954                                                   "queue!\n", j);
955                                         bnx2x_free_tpa_pool(bp, fp, i);
956                                         fp->disable_tpa = 1;
957                                         break;
958                                 }
959                                 dma_unmap_addr_set((struct sw_rx_bd *)
960                                                         &bp->fp->tpa_pool[i],
961                                                    mapping, 0);
962                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
963                         }
964
965                         /* "next page" elements initialization */
966                         bnx2x_set_next_page_sgl(fp);
967
968                         /* set SGEs bit mask */
969                         bnx2x_init_sge_ring_bit_mask(fp);
970
971                         /* Allocate SGEs and initialize the ring elements */
972                         for (i = 0, ring_prod = 0;
973                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
974
975                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
976                                         BNX2X_ERR("was only able to allocate "
977                                                   "%d rx sges\n", i);
978                                         BNX2X_ERR("disabling TPA for"
979                                                   " queue[%d]\n", j);
980                                         /* Cleanup already allocated elements */
981                                         bnx2x_free_rx_sge_range(bp,
982                                                                 fp, ring_prod);
983                                         bnx2x_free_tpa_pool(bp,
984                                                             fp, max_agg_queues);
985                                         fp->disable_tpa = 1;
986                                         ring_prod = 0;
987                                         break;
988                                 }
989                                 ring_prod = NEXT_SGE_IDX(ring_prod);
990                         }
991
992                         fp->rx_sge_prod = ring_prod;
993                 }
994         }
995
996         for_each_rx_queue(bp, j) {
997                 struct bnx2x_fastpath *fp = &bp->fp[j];
998
999                 fp->rx_bd_cons = 0;
1000
1001                 /* Activate BD ring */
1002                 /* Warning!
1003                  * this will generate an interrupt (to the TSTORM)
1004                  * must only be done after chip is initialized
1005                  */
1006                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1007                                      fp->rx_sge_prod);
1008
1009                 if (j != 0)
1010                         continue;
1011
1012                 if (!CHIP_IS_E2(bp)) {
1013                         REG_WR(bp, BAR_USTRORM_INTMEM +
1014                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1015                                U64_LO(fp->rx_comp_mapping));
1016                         REG_WR(bp, BAR_USTRORM_INTMEM +
1017                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1018                                U64_HI(fp->rx_comp_mapping));
1019                 }
1020         }
1021 }
1022
1023 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1024 {
1025         int i;
1026
1027         for_each_tx_queue(bp, i) {
1028                 struct bnx2x_fastpath *fp = &bp->fp[i];
1029
1030                 u16 bd_cons = fp->tx_bd_cons;
1031                 u16 sw_prod = fp->tx_pkt_prod;
1032                 u16 sw_cons = fp->tx_pkt_cons;
1033
1034                 while (sw_cons != sw_prod) {
1035                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1036                         sw_cons++;
1037                 }
1038         }
1039 }
1040
1041 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1042 {
1043         struct bnx2x *bp = fp->bp;
1044         int i;
1045
1046         /* ring wasn't allocated */
1047         if (fp->rx_buf_ring == NULL)
1048                 return;
1049
1050         for (i = 0; i < NUM_RX_BD; i++) {
1051                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1052                 struct sk_buff *skb = rx_buf->skb;
1053
1054                 if (skb == NULL)
1055                         continue;
1056
1057                 dma_unmap_single(&bp->pdev->dev,
1058                                  dma_unmap_addr(rx_buf, mapping),
1059                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1060
1061                 rx_buf->skb = NULL;
1062                 dev_kfree_skb(skb);
1063         }
1064 }
1065
1066 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1067 {
1068         int j;
1069
1070         for_each_rx_queue(bp, j) {
1071                 struct bnx2x_fastpath *fp = &bp->fp[j];
1072
1073                 bnx2x_free_rx_bds(fp);
1074
1075                 if (!fp->disable_tpa)
1076                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1077                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
1078                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
1079         }
1080 }
1081
1082 void bnx2x_free_skbs(struct bnx2x *bp)
1083 {
1084         bnx2x_free_tx_skbs(bp);
1085         bnx2x_free_rx_skbs(bp);
1086 }
1087
1088 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1089 {
1090         /* load old values */
1091         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1092
1093         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1094                 /* leave all but MAX value */
1095                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1096
1097                 /* set new MAX value */
1098                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1099                                 & FUNC_MF_CFG_MAX_BW_MASK;
1100
1101                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1102         }
1103 }
1104
1105 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1106 {
1107         int i, offset = 1;
1108
1109         free_irq(bp->msix_table[0].vector, bp->dev);
1110         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1111            bp->msix_table[0].vector);
1112
1113 #ifdef BCM_CNIC
1114         offset++;
1115 #endif
1116         for_each_eth_queue(bp, i) {
1117                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
1118                    "state %x\n", i, bp->msix_table[i + offset].vector,
1119                    bnx2x_fp(bp, i, state));
1120
1121                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1122         }
1123 }
1124
1125 void bnx2x_free_irq(struct bnx2x *bp)
1126 {
1127         if (bp->flags & USING_MSIX_FLAG)
1128                 bnx2x_free_msix_irqs(bp);
1129         else if (bp->flags & USING_MSI_FLAG)
1130                 free_irq(bp->pdev->irq, bp->dev);
1131         else
1132                 free_irq(bp->pdev->irq, bp->dev);
1133 }
1134
1135 int bnx2x_enable_msix(struct bnx2x *bp)
1136 {
1137         int msix_vec = 0, i, rc, req_cnt;
1138
1139         bp->msix_table[msix_vec].entry = msix_vec;
1140         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1141            bp->msix_table[0].entry);
1142         msix_vec++;
1143
1144 #ifdef BCM_CNIC
1145         bp->msix_table[msix_vec].entry = msix_vec;
1146         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1147            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1148         msix_vec++;
1149 #endif
1150         for_each_eth_queue(bp, i) {
1151                 bp->msix_table[msix_vec].entry = msix_vec;
1152                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1153                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1154                 msix_vec++;
1155         }
1156
1157         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1158
1159         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1160
1161         /*
1162          * reconfigure number of tx/rx queues according to available
1163          * MSI-X vectors
1164          */
1165         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1166                 /* how less vectors we will have? */
1167                 int diff = req_cnt - rc;
1168
1169                 DP(NETIF_MSG_IFUP,
1170                    "Trying to use less MSI-X vectors: %d\n", rc);
1171
1172                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1173
1174                 if (rc) {
1175                         DP(NETIF_MSG_IFUP,
1176                            "MSI-X is not attainable  rc %d\n", rc);
1177                         return rc;
1178                 }
1179                 /*
1180                  * decrease number of queues by number of unallocated entries
1181                  */
1182                 bp->num_queues -= diff;
1183
1184                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1185                                   bp->num_queues);
1186         } else if (rc) {
1187                 /* fall to INTx if not enough memory */
1188                 if (rc == -ENOMEM)
1189                         bp->flags |= DISABLE_MSI_FLAG;
1190                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1191                 return rc;
1192         }
1193
1194         bp->flags |= USING_MSIX_FLAG;
1195
1196         return 0;
1197 }
1198
1199 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1200 {
1201         int i, rc, offset = 1;
1202
1203         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1204                          bp->dev->name, bp->dev);
1205         if (rc) {
1206                 BNX2X_ERR("request sp irq failed\n");
1207                 return -EBUSY;
1208         }
1209
1210 #ifdef BCM_CNIC
1211         offset++;
1212 #endif
1213         for_each_eth_queue(bp, i) {
1214                 struct bnx2x_fastpath *fp = &bp->fp[i];
1215                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1216                          bp->dev->name, i);
1217
1218                 rc = request_irq(bp->msix_table[offset].vector,
1219                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1220                 if (rc) {
1221                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
1222                         bnx2x_free_msix_irqs(bp);
1223                         return -EBUSY;
1224                 }
1225
1226                 offset++;
1227                 fp->state = BNX2X_FP_STATE_IRQ;
1228         }
1229
1230         i = BNX2X_NUM_ETH_QUEUES(bp);
1231         offset = 1 + CNIC_CONTEXT_USE;
1232         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1233                " ... fp[%d] %d\n",
1234                bp->msix_table[0].vector,
1235                0, bp->msix_table[offset].vector,
1236                i - 1, bp->msix_table[offset + i - 1].vector);
1237
1238         return 0;
1239 }
1240
1241 int bnx2x_enable_msi(struct bnx2x *bp)
1242 {
1243         int rc;
1244
1245         rc = pci_enable_msi(bp->pdev);
1246         if (rc) {
1247                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1248                 return -1;
1249         }
1250         bp->flags |= USING_MSI_FLAG;
1251
1252         return 0;
1253 }
1254
1255 static int bnx2x_req_irq(struct bnx2x *bp)
1256 {
1257         unsigned long flags;
1258         int rc;
1259
1260         if (bp->flags & USING_MSI_FLAG)
1261                 flags = 0;
1262         else
1263                 flags = IRQF_SHARED;
1264
1265         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1266                          bp->dev->name, bp->dev);
1267         if (!rc)
1268                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1269
1270         return rc;
1271 }
1272
1273 static void bnx2x_napi_enable(struct bnx2x *bp)
1274 {
1275         int i;
1276
1277         for_each_napi_queue(bp, i)
1278                 napi_enable(&bnx2x_fp(bp, i, napi));
1279 }
1280
1281 static void bnx2x_napi_disable(struct bnx2x *bp)
1282 {
1283         int i;
1284
1285         for_each_napi_queue(bp, i)
1286                 napi_disable(&bnx2x_fp(bp, i, napi));
1287 }
1288
1289 void bnx2x_netif_start(struct bnx2x *bp)
1290 {
1291         int intr_sem;
1292
1293         intr_sem = atomic_dec_and_test(&bp->intr_sem);
1294         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1295
1296         if (intr_sem) {
1297                 if (netif_running(bp->dev)) {
1298                         bnx2x_napi_enable(bp);
1299                         bnx2x_int_enable(bp);
1300                         if (bp->state == BNX2X_STATE_OPEN)
1301                                 netif_tx_wake_all_queues(bp->dev);
1302                 }
1303         }
1304 }
1305
1306 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1307 {
1308         bnx2x_int_disable_sync(bp, disable_hw);
1309         bnx2x_napi_disable(bp);
1310         netif_tx_disable(bp->dev);
1311 }
1312
1313 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1314 {
1315 #ifdef BCM_CNIC
1316         struct bnx2x *bp = netdev_priv(dev);
1317         if (NO_FCOE(bp))
1318                 return skb_tx_hash(dev, skb);
1319         else {
1320                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1321                 u16 ether_type = ntohs(hdr->h_proto);
1322
1323                 /* Skip VLAN tag if present */
1324                 if (ether_type == ETH_P_8021Q) {
1325                         struct vlan_ethhdr *vhdr =
1326                                 (struct vlan_ethhdr *)skb->data;
1327
1328                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1329                 }
1330
1331                 /* If ethertype is FCoE or FIP - use FCoE ring */
1332                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1333                         return bnx2x_fcoe(bp, index);
1334         }
1335 #endif
1336         /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
1337          */
1338         return __skb_tx_hash(dev, skb,
1339                         dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1340 }
1341
1342 void bnx2x_set_num_queues(struct bnx2x *bp)
1343 {
1344         switch (bp->multi_mode) {
1345         case ETH_RSS_MODE_DISABLED:
1346                 bp->num_queues = 1;
1347                 break;
1348         case ETH_RSS_MODE_REGULAR:
1349                 bp->num_queues = bnx2x_calc_num_queues(bp);
1350                 break;
1351
1352         default:
1353                 bp->num_queues = 1;
1354                 break;
1355         }
1356
1357         /* Add special queues */
1358         bp->num_queues += NONE_ETH_CONTEXT_USE;
1359 }
1360
1361 #ifdef BCM_CNIC
1362 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1363 {
1364         if (!NO_FCOE(bp)) {
1365                 if (!IS_MF_SD(bp))
1366                         bnx2x_set_fip_eth_mac_addr(bp, 1);
1367                 bnx2x_set_all_enode_macs(bp, 1);
1368                 bp->flags |= FCOE_MACS_SET;
1369         }
1370 }
1371 #endif
1372
1373 static void bnx2x_release_firmware(struct bnx2x *bp)
1374 {
1375         kfree(bp->init_ops_offsets);
1376         kfree(bp->init_ops);
1377         kfree(bp->init_data);
1378         release_firmware(bp->firmware);
1379 }
1380
1381 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1382 {
1383         int rc, num = bp->num_queues;
1384
1385 #ifdef BCM_CNIC
1386         if (NO_FCOE(bp))
1387                 num -= FCOE_CONTEXT_USE;
1388
1389 #endif
1390         netif_set_real_num_tx_queues(bp->dev, num);
1391         rc = netif_set_real_num_rx_queues(bp->dev, num);
1392         return rc;
1393 }
1394
1395 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1396 {
1397         int i;
1398
1399         for_each_queue(bp, i) {
1400                 struct bnx2x_fastpath *fp = &bp->fp[i];
1401
1402                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1403                 if (IS_FCOE_IDX(i))
1404                         /*
1405                          * Although there are no IP frames expected to arrive to
1406                          * this ring we still want to add an
1407                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1408                          * overrun attack.
1409                          */
1410                         fp->rx_buf_size =
1411                                 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1412                                 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1413                 else
1414                         fp->rx_buf_size =
1415                                 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1416                                 IP_HEADER_ALIGNMENT_PADDING;
1417         }
1418 }
1419
1420 /* must be called with rtnl_lock */
1421 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1422 {
1423         u32 load_code;
1424         int i, rc;
1425
1426         /* Set init arrays */
1427         rc = bnx2x_init_firmware(bp);
1428         if (rc) {
1429                 BNX2X_ERR("Error loading firmware\n");
1430                 return rc;
1431         }
1432
1433 #ifdef BNX2X_STOP_ON_ERROR
1434         if (unlikely(bp->panic))
1435                 return -EPERM;
1436 #endif
1437
1438         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1439
1440         /* Set the initial link reported state to link down */
1441         bnx2x_acquire_phy_lock(bp);
1442         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1443         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1444                 &bp->last_reported_link.link_report_flags);
1445         bnx2x_release_phy_lock(bp);
1446
1447         /* must be called before memory allocation and HW init */
1448         bnx2x_ilt_set_info(bp);
1449
1450         /* zero fastpath structures preserving invariants like napi which are
1451          * allocated only once
1452          */
1453         for_each_queue(bp, i)
1454                 bnx2x_bz_fp(bp, i);
1455
1456         /* Set the receive queues buffer size */
1457         bnx2x_set_rx_buf_size(bp);
1458
1459         for_each_queue(bp, i)
1460                 bnx2x_fp(bp, i, disable_tpa) =
1461                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1462
1463 #ifdef BCM_CNIC
1464         /* We don't want TPA on FCoE L2 ring */
1465         bnx2x_fcoe(bp, disable_tpa) = 1;
1466 #endif
1467
1468         if (bnx2x_alloc_mem(bp))
1469                 return -ENOMEM;
1470
1471         /* As long as bnx2x_alloc_mem() may possibly update
1472          * bp->num_queues, bnx2x_set_real_num_queues() should always
1473          * come after it.
1474          */
1475         rc = bnx2x_set_real_num_queues(bp);
1476         if (rc) {
1477                 BNX2X_ERR("Unable to set real_num_queues\n");
1478                 goto load_error0;
1479         }
1480
1481         bnx2x_napi_enable(bp);
1482
1483         /* Send LOAD_REQUEST command to MCP
1484            Returns the type of LOAD command:
1485            if it is the first port to be initialized
1486            common blocks should be initialized, otherwise - not
1487         */
1488         if (!BP_NOMCP(bp)) {
1489                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1490                 if (!load_code) {
1491                         BNX2X_ERR("MCP response failure, aborting\n");
1492                         rc = -EBUSY;
1493                         goto load_error1;
1494                 }
1495                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1496                         rc = -EBUSY; /* other port in diagnostic mode */
1497                         goto load_error1;
1498                 }
1499
1500         } else {
1501                 int path = BP_PATH(bp);
1502                 int port = BP_PORT(bp);
1503
1504                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1505                    path, load_count[path][0], load_count[path][1],
1506                    load_count[path][2]);
1507                 load_count[path][0]++;
1508                 load_count[path][1 + port]++;
1509                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1510                    path, load_count[path][0], load_count[path][1],
1511                    load_count[path][2]);
1512                 if (load_count[path][0] == 1)
1513                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1514                 else if (load_count[path][1 + port] == 1)
1515                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1516                 else
1517                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1518         }
1519
1520         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1521             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1522             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1523                 bp->port.pmf = 1;
1524         else
1525                 bp->port.pmf = 0;
1526         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1527
1528         /* Initialize HW */
1529         rc = bnx2x_init_hw(bp, load_code);
1530         if (rc) {
1531                 BNX2X_ERR("HW init failed, aborting\n");
1532                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1533                 goto load_error2;
1534         }
1535
1536         /* Connect to IRQs */
1537         rc = bnx2x_setup_irqs(bp);
1538         if (rc) {
1539                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1540                 goto load_error2;
1541         }
1542
1543         /* Setup NIC internals and enable interrupts */
1544         bnx2x_nic_init(bp, load_code);
1545
1546         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1547             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1548             (bp->common.shmem2_base))
1549                 SHMEM2_WR(bp, dcc_support,
1550                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1551                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1552
1553         /* Send LOAD_DONE command to MCP */
1554         if (!BP_NOMCP(bp)) {
1555                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1556                 if (!load_code) {
1557                         BNX2X_ERR("MCP response failure, aborting\n");
1558                         rc = -EBUSY;
1559                         goto load_error3;
1560                 }
1561         }
1562
1563         bnx2x_dcbx_init(bp);
1564
1565         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1566
1567         rc = bnx2x_func_start(bp);
1568         if (rc) {
1569                 BNX2X_ERR("Function start failed!\n");
1570 #ifndef BNX2X_STOP_ON_ERROR
1571                 goto load_error3;
1572 #else
1573                 bp->panic = 1;
1574                 return -EBUSY;
1575 #endif
1576         }
1577
1578         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1579         if (rc) {
1580                 BNX2X_ERR("Setup leading failed!\n");
1581 #ifndef BNX2X_STOP_ON_ERROR
1582                 goto load_error3;
1583 #else
1584                 bp->panic = 1;
1585                 return -EBUSY;
1586 #endif
1587         }
1588
1589         if (!CHIP_IS_E1(bp) &&
1590             (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1591                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1592                 bp->flags |= MF_FUNC_DIS;
1593         }
1594
1595 #ifdef BCM_CNIC
1596         /* Enable Timer scan */
1597         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1598 #endif
1599
1600         for_each_nondefault_queue(bp, i) {
1601                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1602                 if (rc)
1603 #ifdef BCM_CNIC
1604                         goto load_error4;
1605 #else
1606                         goto load_error3;
1607 #endif
1608         }
1609
1610         /* Now when Clients are configured we are ready to work */
1611         bp->state = BNX2X_STATE_OPEN;
1612
1613 #ifdef BCM_CNIC
1614         bnx2x_set_fcoe_eth_macs(bp);
1615 #endif
1616
1617         bnx2x_set_eth_mac(bp, 1);
1618
1619         /* Clear MC configuration */
1620         if (CHIP_IS_E1(bp))
1621                 bnx2x_invalidate_e1_mc_list(bp);
1622         else
1623                 bnx2x_invalidate_e1h_mc_list(bp);
1624
1625         /* Clear UC lists configuration */
1626         bnx2x_invalidate_uc_list(bp);
1627
1628         if (bp->pending_max) {
1629                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1630                 bp->pending_max = 0;
1631         }
1632
1633         if (bp->port.pmf)
1634                 bnx2x_initial_phy_init(bp, load_mode);
1635
1636         /* Initialize Rx filtering */
1637         bnx2x_set_rx_mode(bp->dev);
1638
1639         /* Start fast path */
1640         switch (load_mode) {
1641         case LOAD_NORMAL:
1642                 /* Tx queue should be only reenabled */
1643                 netif_tx_wake_all_queues(bp->dev);
1644                 /* Initialize the receive filter. */
1645                 break;
1646
1647         case LOAD_OPEN:
1648                 netif_tx_start_all_queues(bp->dev);
1649                 smp_mb__after_clear_bit();
1650                 break;
1651
1652         case LOAD_DIAG:
1653                 bp->state = BNX2X_STATE_DIAG;
1654                 break;
1655
1656         default:
1657                 break;
1658         }
1659
1660         if (!bp->port.pmf)
1661                 bnx2x__link_status_update(bp);
1662
1663         /* start the timer */
1664         mod_timer(&bp->timer, jiffies + bp->current_interval);
1665
1666 #ifdef BCM_CNIC
1667         bnx2x_setup_cnic_irq_info(bp);
1668         if (bp->state == BNX2X_STATE_OPEN)
1669                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1670 #endif
1671         bnx2x_inc_load_cnt(bp);
1672
1673         bnx2x_release_firmware(bp);
1674
1675         return 0;
1676
1677 #ifdef BCM_CNIC
1678 load_error4:
1679         /* Disable Timer scan */
1680         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1681 #endif
1682 load_error3:
1683         bnx2x_int_disable_sync(bp, 1);
1684
1685         /* Free SKBs, SGEs, TPA pool and driver internals */
1686         bnx2x_free_skbs(bp);
1687         for_each_rx_queue(bp, i)
1688                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1689
1690         /* Release IRQs */
1691         bnx2x_free_irq(bp);
1692 load_error2:
1693         if (!BP_NOMCP(bp)) {
1694                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1695                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1696         }
1697
1698         bp->port.pmf = 0;
1699 load_error1:
1700         bnx2x_napi_disable(bp);
1701 load_error0:
1702         bnx2x_free_mem(bp);
1703
1704         bnx2x_release_firmware(bp);
1705
1706         return rc;
1707 }
1708
1709 /* must be called with rtnl_lock */
1710 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1711 {
1712         int i;
1713
1714         if (bp->state == BNX2X_STATE_CLOSED) {
1715                 /* Interface has been removed - nothing to recover */
1716                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1717                 bp->is_leader = 0;
1718                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1719                 smp_wmb();
1720
1721                 return -EINVAL;
1722         }
1723
1724 #ifdef BCM_CNIC
1725         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1726 #endif
1727         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1728
1729         /* Set "drop all" */
1730         bp->rx_mode = BNX2X_RX_MODE_NONE;
1731         bnx2x_set_storm_rx_mode(bp);
1732
1733         /* Stop Tx */
1734         bnx2x_tx_disable(bp);
1735
1736         del_timer_sync(&bp->timer);
1737
1738         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1739                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1740
1741         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1742
1743         /* Cleanup the chip if needed */
1744         if (unload_mode != UNLOAD_RECOVERY)
1745                 bnx2x_chip_cleanup(bp, unload_mode);
1746         else {
1747                 /* Disable HW interrupts, NAPI and Tx */
1748                 bnx2x_netif_stop(bp, 1);
1749
1750                 /* Release IRQs */
1751                 bnx2x_free_irq(bp);
1752         }
1753
1754         bp->port.pmf = 0;
1755
1756         /* Free SKBs, SGEs, TPA pool and driver internals */
1757         bnx2x_free_skbs(bp);
1758         for_each_rx_queue(bp, i)
1759                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1760
1761         bnx2x_free_mem(bp);
1762
1763         bp->state = BNX2X_STATE_CLOSED;
1764
1765         /* The last driver must disable a "close the gate" if there is no
1766          * parity attention or "process kill" pending.
1767          */
1768         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1769             bnx2x_reset_is_done(bp))
1770                 bnx2x_disable_close_the_gate(bp);
1771
1772         /* Reset MCP mail box sequence if there is on going recovery */
1773         if (unload_mode == UNLOAD_RECOVERY)
1774                 bp->fw_seq = 0;
1775
1776         return 0;
1777 }
1778
1779 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1780 {
1781         u16 pmcsr;
1782
1783         /* If there is no power capability, silently succeed */
1784         if (!bp->pm_cap) {
1785                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1786                 return 0;
1787         }
1788
1789         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1790
1791         switch (state) {
1792         case PCI_D0:
1793                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1794                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1795                                        PCI_PM_CTRL_PME_STATUS));
1796
1797                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1798                         /* delay required during transition out of D3hot */
1799                         msleep(20);
1800                 break;
1801
1802         case PCI_D3hot:
1803                 /* If there are other clients above don't
1804                    shut down the power */
1805                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1806                         return 0;
1807                 /* Don't shut down the power for emulation and FPGA */
1808                 if (CHIP_REV_IS_SLOW(bp))
1809                         return 0;
1810
1811                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1812                 pmcsr |= 3;
1813
1814                 if (bp->wol)
1815                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1816
1817                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1818                                       pmcsr);
1819
1820                 /* No more memory access after this point until
1821                 * device is brought back to D0.
1822                 */
1823                 break;
1824
1825         default:
1826                 return -EINVAL;
1827         }
1828         return 0;
1829 }
1830
1831 /*
1832  * net_device service functions
1833  */
1834 int bnx2x_poll(struct napi_struct *napi, int budget)
1835 {
1836         int work_done = 0;
1837         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1838                                                  napi);
1839         struct bnx2x *bp = fp->bp;
1840
1841         while (1) {
1842 #ifdef BNX2X_STOP_ON_ERROR
1843                 if (unlikely(bp->panic)) {
1844                         napi_complete(napi);
1845                         return 0;
1846                 }
1847 #endif
1848
1849                 if (bnx2x_has_tx_work(fp))
1850                         bnx2x_tx_int(fp);
1851
1852                 if (bnx2x_has_rx_work(fp)) {
1853                         work_done += bnx2x_rx_int(fp, budget - work_done);
1854
1855                         /* must not complete if we consumed full budget */
1856                         if (work_done >= budget)
1857                                 break;
1858                 }
1859
1860                 /* Fall out from the NAPI loop if needed */
1861                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1862 #ifdef BCM_CNIC
1863                         /* No need to update SB for FCoE L2 ring as long as
1864                          * it's connected to the default SB and the SB
1865                          * has been updated when NAPI was scheduled.
1866                          */
1867                         if (IS_FCOE_FP(fp)) {
1868                                 napi_complete(napi);
1869                                 break;
1870                         }
1871 #endif
1872
1873                         bnx2x_update_fpsb_idx(fp);
1874                         /* bnx2x_has_rx_work() reads the status block,
1875                          * thus we need to ensure that status block indices
1876                          * have been actually read (bnx2x_update_fpsb_idx)
1877                          * prior to this check (bnx2x_has_rx_work) so that
1878                          * we won't write the "newer" value of the status block
1879                          * to IGU (if there was a DMA right after
1880                          * bnx2x_has_rx_work and if there is no rmb, the memory
1881                          * reading (bnx2x_update_fpsb_idx) may be postponed
1882                          * to right before bnx2x_ack_sb). In this case there
1883                          * will never be another interrupt until there is
1884                          * another update of the status block, while there
1885                          * is still unhandled work.
1886                          */
1887                         rmb();
1888
1889                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1890                                 napi_complete(napi);
1891                                 /* Re-enable interrupts */
1892                                 DP(NETIF_MSG_HW,
1893                                    "Update index to %d\n", fp->fp_hc_idx);
1894                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1895                                              le16_to_cpu(fp->fp_hc_idx),
1896                                              IGU_INT_ENABLE, 1);
1897                                 break;
1898                         }
1899                 }
1900         }
1901
1902         return work_done;
1903 }
1904
1905 /* we split the first BD into headers and data BDs
1906  * to ease the pain of our fellow microcode engineers
1907  * we use one mapping for both BDs
1908  * So far this has only been observed to happen
1909  * in Other Operating Systems(TM)
1910  */
1911 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1912                                    struct bnx2x_fastpath *fp,
1913                                    struct sw_tx_bd *tx_buf,
1914                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1915                                    u16 bd_prod, int nbd)
1916 {
1917         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1918         struct eth_tx_bd *d_tx_bd;
1919         dma_addr_t mapping;
1920         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1921
1922         /* first fix first BD */
1923         h_tx_bd->nbd = cpu_to_le16(nbd);
1924         h_tx_bd->nbytes = cpu_to_le16(hlen);
1925
1926         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1927            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1928            h_tx_bd->addr_lo, h_tx_bd->nbd);
1929
1930         /* now get a new data BD
1931          * (after the pbd) and fill it */
1932         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1933         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1934
1935         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1936                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1937
1938         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1939         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1940         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1941
1942         /* this marks the BD as one that has no individual mapping */
1943         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1944
1945         DP(NETIF_MSG_TX_QUEUED,
1946            "TSO split data size is %d (%x:%x)\n",
1947            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1948
1949         /* update tx_bd */
1950         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1951
1952         return bd_prod;
1953 }
1954
1955 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1956 {
1957         if (fix > 0)
1958                 csum = (u16) ~csum_fold(csum_sub(csum,
1959                                 csum_partial(t_header - fix, fix, 0)));
1960
1961         else if (fix < 0)
1962                 csum = (u16) ~csum_fold(csum_add(csum,
1963                                 csum_partial(t_header, -fix, 0)));
1964
1965         return swab16(csum);
1966 }
1967
1968 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1969 {
1970         u32 rc;
1971
1972         if (skb->ip_summed != CHECKSUM_PARTIAL)
1973                 rc = XMIT_PLAIN;
1974
1975         else {
1976                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1977                         rc = XMIT_CSUM_V6;
1978                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1979                                 rc |= XMIT_CSUM_TCP;
1980
1981                 } else {
1982                         rc = XMIT_CSUM_V4;
1983                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1984                                 rc |= XMIT_CSUM_TCP;
1985                 }
1986         }
1987
1988         if (skb_is_gso_v6(skb))
1989                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1990         else if (skb_is_gso(skb))
1991                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1992
1993         return rc;
1994 }
1995
1996 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1997 /* check if packet requires linearization (packet is too fragmented)
1998    no need to check fragmentation if page size > 8K (there will be no
1999    violation to FW restrictions) */
2000 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2001                              u32 xmit_type)
2002 {
2003         int to_copy = 0;
2004         int hlen = 0;
2005         int first_bd_sz = 0;
2006
2007         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2008         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2009
2010                 if (xmit_type & XMIT_GSO) {
2011                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2012                         /* Check if LSO packet needs to be copied:
2013                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2014                         int wnd_size = MAX_FETCH_BD - 3;
2015                         /* Number of windows to check */
2016                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2017                         int wnd_idx = 0;
2018                         int frag_idx = 0;
2019                         u32 wnd_sum = 0;
2020
2021                         /* Headers length */
2022                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2023                                 tcp_hdrlen(skb);
2024
2025                         /* Amount of data (w/o headers) on linear part of SKB*/
2026                         first_bd_sz = skb_headlen(skb) - hlen;
2027
2028                         wnd_sum  = first_bd_sz;
2029
2030                         /* Calculate the first sum - it's special */
2031                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2032                                 wnd_sum +=
2033                                         skb_shinfo(skb)->frags[frag_idx].size;
2034
2035                         /* If there was data on linear skb data - check it */
2036                         if (first_bd_sz > 0) {
2037                                 if (unlikely(wnd_sum < lso_mss)) {
2038                                         to_copy = 1;
2039                                         goto exit_lbl;
2040                                 }
2041
2042                                 wnd_sum -= first_bd_sz;
2043                         }
2044
2045                         /* Others are easier: run through the frag list and
2046                            check all windows */
2047                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2048                                 wnd_sum +=
2049                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2050
2051                                 if (unlikely(wnd_sum < lso_mss)) {
2052                                         to_copy = 1;
2053                                         break;
2054                                 }
2055                                 wnd_sum -=
2056                                         skb_shinfo(skb)->frags[wnd_idx].size;
2057                         }
2058                 } else {
2059                         /* in non-LSO too fragmented packet should always
2060                            be linearized */
2061                         to_copy = 1;
2062                 }
2063         }
2064
2065 exit_lbl:
2066         if (unlikely(to_copy))
2067                 DP(NETIF_MSG_TX_QUEUED,
2068                    "Linearization IS REQUIRED for %s packet. "
2069                    "num_frags %d  hlen %d  first_bd_sz %d\n",
2070                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2071                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2072
2073         return to_copy;
2074 }
2075 #endif
2076
2077 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2078                                         u32 xmit_type)
2079 {
2080         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2081                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2082                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2083         if ((xmit_type & XMIT_GSO_V6) &&
2084             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2085                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2086 }
2087
2088 /**
2089  * bnx2x_set_pbd_gso - update PBD in GSO case.
2090  *
2091  * @skb:        packet skb
2092  * @pbd:        parse BD
2093  * @xmit_type:  xmit flags
2094  */
2095 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2096                                      struct eth_tx_parse_bd_e1x *pbd,
2097                                      u32 xmit_type)
2098 {
2099         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2100         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2101         pbd->tcp_flags = pbd_tcp_flags(skb);
2102
2103         if (xmit_type & XMIT_GSO_V4) {
2104                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2105                 pbd->tcp_pseudo_csum =
2106                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2107                                                   ip_hdr(skb)->daddr,
2108                                                   0, IPPROTO_TCP, 0));
2109
2110         } else
2111                 pbd->tcp_pseudo_csum =
2112                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2113                                                 &ipv6_hdr(skb)->daddr,
2114                                                 0, IPPROTO_TCP, 0));
2115
2116         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2117 }
2118
2119 /**
2120  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2121  *
2122  * @bp:                 driver handle
2123  * @skb:                packet skb
2124  * @parsing_data:       data to be updated
2125  * @xmit_type:          xmit flags
2126  *
2127  * 57712 related
2128  */
2129 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2130         u32 *parsing_data, u32 xmit_type)
2131 {
2132         *parsing_data |=
2133                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2134                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2135                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2136
2137         if (xmit_type & XMIT_CSUM_TCP) {
2138                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2139                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2140                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2141
2142                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2143         } else
2144                 /* We support checksum offload for TCP and UDP only.
2145                  * No need to pass the UDP header length - it's a constant.
2146                  */
2147                 return skb_transport_header(skb) +
2148                                 sizeof(struct udphdr) - skb->data;
2149 }
2150
2151 /**
2152  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2153  *
2154  * @bp:         driver handle
2155  * @skb:        packet skb
2156  * @pbd:        parse BD to be updated
2157  * @xmit_type:  xmit flags
2158  */
2159 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2160         struct eth_tx_parse_bd_e1x *pbd,
2161         u32 xmit_type)
2162 {
2163         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2164
2165         /* for now NS flag is not used in Linux */
2166         pbd->global_data =
2167                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2168                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2169
2170         pbd->ip_hlen_w = (skb_transport_header(skb) -
2171                         skb_network_header(skb)) >> 1;
2172
2173         hlen += pbd->ip_hlen_w;
2174
2175         /* We support checksum offload for TCP and UDP only */
2176         if (xmit_type & XMIT_CSUM_TCP)
2177                 hlen += tcp_hdrlen(skb) / 2;
2178         else
2179                 hlen += sizeof(struct udphdr) / 2;
2180
2181         pbd->total_hlen_w = cpu_to_le16(hlen);
2182         hlen = hlen*2;
2183
2184         if (xmit_type & XMIT_CSUM_TCP) {
2185                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2186
2187         } else {
2188                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2189
2190                 DP(NETIF_MSG_TX_QUEUED,
2191                    "hlen %d  fix %d  csum before fix %x\n",
2192                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2193
2194                 /* HW bug: fixup the CSUM */
2195                 pbd->tcp_pseudo_csum =
2196                         bnx2x_csum_fix(skb_transport_header(skb),
2197                                        SKB_CS(skb), fix);
2198
2199                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2200                    pbd->tcp_pseudo_csum);
2201         }
2202
2203         return hlen;
2204 }
2205
2206 /* called with netif_tx_lock
2207  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2208  * netif_wake_queue()
2209  */
2210 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2211 {
2212         struct bnx2x *bp = netdev_priv(dev);
2213         struct bnx2x_fastpath *fp;
2214         struct netdev_queue *txq;
2215         struct sw_tx_bd *tx_buf;
2216         struct eth_tx_start_bd *tx_start_bd;
2217         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2218         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2219         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2220         u32 pbd_e2_parsing_data = 0;
2221         u16 pkt_prod, bd_prod;
2222         int nbd, fp_index;
2223         dma_addr_t mapping;
2224         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2225         int i;
2226         u8 hlen = 0;
2227         __le16 pkt_size = 0;
2228         struct ethhdr *eth;
2229         u8 mac_type = UNICAST_ADDRESS;
2230
2231 #ifdef BNX2X_STOP_ON_ERROR
2232         if (unlikely(bp->panic))
2233                 return NETDEV_TX_BUSY;
2234 #endif
2235
2236         fp_index = skb_get_queue_mapping(skb);
2237         txq = netdev_get_tx_queue(dev, fp_index);
2238
2239         fp = &bp->fp[fp_index];
2240
2241         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2242                 fp->eth_q_stats.driver_xoff++;
2243                 netif_tx_stop_queue(txq);
2244                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2245                 return NETDEV_TX_BUSY;
2246         }
2247
2248         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2249                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2250            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2251            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2252
2253         eth = (struct ethhdr *)skb->data;
2254
2255         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2256         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2257                 if (is_broadcast_ether_addr(eth->h_dest))
2258                         mac_type = BROADCAST_ADDRESS;
2259                 else
2260                         mac_type = MULTICAST_ADDRESS;
2261         }
2262
2263 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2264         /* First, check if we need to linearize the skb (due to FW
2265            restrictions). No need to check fragmentation if page size > 8K
2266            (there will be no violation to FW restrictions) */
2267         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2268                 /* Statistics of linearization */
2269                 bp->lin_cnt++;
2270                 if (skb_linearize(skb) != 0) {
2271                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2272                            "silently dropping this SKB\n");
2273                         dev_kfree_skb_any(skb);
2274                         return NETDEV_TX_OK;
2275                 }
2276         }
2277 #endif
2278
2279         /*
2280         Please read carefully. First we use one BD which we mark as start,
2281         then we have a parsing info BD (used for TSO or xsum),
2282         and only then we have the rest of the TSO BDs.
2283         (don't forget to mark the last one as last,
2284         and to unmap only AFTER you write to the BD ...)
2285         And above all, all pdb sizes are in words - NOT DWORDS!
2286         */
2287
2288         pkt_prod = fp->tx_pkt_prod++;
2289         bd_prod = TX_BD(fp->tx_bd_prod);
2290
2291         /* get a tx_buf and first BD */
2292         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2293         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2294
2295         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2296         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2297                  mac_type);
2298
2299         /* header nbd */
2300         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2301
2302         /* remember the first BD of the packet */
2303         tx_buf->first_bd = fp->tx_bd_prod;
2304         tx_buf->skb = skb;
2305         tx_buf->flags = 0;
2306
2307         DP(NETIF_MSG_TX_QUEUED,
2308            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2309            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2310
2311         if (vlan_tx_tag_present(skb)) {
2312                 tx_start_bd->vlan_or_ethertype =
2313                     cpu_to_le16(vlan_tx_tag_get(skb));
2314                 tx_start_bd->bd_flags.as_bitfield |=
2315                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2316         } else
2317                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2318
2319         /* turn on parsing and get a BD */
2320         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2321
2322         if (xmit_type & XMIT_CSUM) {
2323                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2324
2325                 if (xmit_type & XMIT_CSUM_V4)
2326                         tx_start_bd->bd_flags.as_bitfield |=
2327                                                 ETH_TX_BD_FLAGS_IP_CSUM;
2328                 else
2329                         tx_start_bd->bd_flags.as_bitfield |=
2330                                                 ETH_TX_BD_FLAGS_IPV6;
2331
2332                 if (!(xmit_type & XMIT_CSUM_TCP))
2333                         tx_start_bd->bd_flags.as_bitfield |=
2334                                                 ETH_TX_BD_FLAGS_IS_UDP;
2335         }
2336
2337         if (CHIP_IS_E2(bp)) {
2338                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2339                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2340                 /* Set PBD in checksum offload case */
2341                 if (xmit_type & XMIT_CSUM)
2342                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2343                                                      &pbd_e2_parsing_data,
2344                                                      xmit_type);
2345         } else {
2346                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2347                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2348                 /* Set PBD in checksum offload case */
2349                 if (xmit_type & XMIT_CSUM)
2350                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2351
2352         }
2353
2354         /* Map skb linear data for DMA */
2355         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2356                                  skb_headlen(skb), DMA_TO_DEVICE);
2357
2358         /* Setup the data pointer of the first BD of the packet */
2359         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2360         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2361         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2362         tx_start_bd->nbd = cpu_to_le16(nbd);
2363         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2364         pkt_size = tx_start_bd->nbytes;
2365
2366         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2367            "  nbytes %d  flags %x  vlan %x\n",
2368            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2369            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2370            tx_start_bd->bd_flags.as_bitfield,
2371            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2372
2373         if (xmit_type & XMIT_GSO) {
2374
2375                 DP(NETIF_MSG_TX_QUEUED,
2376                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2377                    skb->len, hlen, skb_headlen(skb),
2378                    skb_shinfo(skb)->gso_size);
2379
2380                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2381
2382                 if (unlikely(skb_headlen(skb) > hlen))
2383                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2384                                                  hlen, bd_prod, ++nbd);
2385                 if (CHIP_IS_E2(bp))
2386                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2387                                              xmit_type);
2388                 else
2389                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2390         }
2391
2392         /* Set the PBD's parsing_data field if not zero
2393          * (for the chips newer than 57711).
2394          */
2395         if (pbd_e2_parsing_data)
2396                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2397
2398         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2399
2400         /* Handle fragmented skb */
2401         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2402                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2403
2404                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2405                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2406                 if (total_pkt_bd == NULL)
2407                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2408
2409                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2410                                        frag->page_offset,
2411                                        frag->size, DMA_TO_DEVICE);
2412
2413                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2414                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2415                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2416                 le16_add_cpu(&pkt_size, frag->size);
2417
2418                 DP(NETIF_MSG_TX_QUEUED,
2419                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2420                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2421                    le16_to_cpu(tx_data_bd->nbytes));
2422         }
2423
2424         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2425
2426         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2427
2428         /* now send a tx doorbell, counting the next BD
2429          * if the packet contains or ends with it
2430          */
2431         if (TX_BD_POFF(bd_prod) < nbd)
2432                 nbd++;
2433
2434         if (total_pkt_bd != NULL)
2435                 total_pkt_bd->total_pkt_bytes = pkt_size;
2436
2437         if (pbd_e1x)
2438                 DP(NETIF_MSG_TX_QUEUED,
2439                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2440                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2441                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2442                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2443                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2444                     le16_to_cpu(pbd_e1x->total_hlen_w));
2445         if (pbd_e2)
2446                 DP(NETIF_MSG_TX_QUEUED,
2447                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2448                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2449                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2450                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2451                    pbd_e2->parsing_data);
2452         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2453
2454         /*
2455          * Make sure that the BD data is updated before updating the producer
2456          * since FW might read the BD right after the producer is updated.
2457          * This is only applicable for weak-ordered memory model archs such
2458          * as IA-64. The following barrier is also mandatory since FW will
2459          * assumes packets must have BDs.
2460          */
2461         wmb();
2462
2463         fp->tx_db.data.prod += nbd;
2464         barrier();
2465
2466         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2467
2468         mmiowb();
2469
2470         fp->tx_bd_prod += nbd;
2471
2472         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2473                 netif_tx_stop_queue(txq);
2474
2475                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2476                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2477                  * fp->bd_tx_cons */
2478                 smp_mb();
2479
2480                 fp->eth_q_stats.driver_xoff++;
2481                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2482                         netif_tx_wake_queue(txq);
2483         }
2484         fp->tx_pkt++;
2485
2486         return NETDEV_TX_OK;
2487 }
2488
2489 /* called with rtnl_lock */
2490 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2491 {
2492         struct sockaddr *addr = p;
2493         struct bnx2x *bp = netdev_priv(dev);
2494
2495         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2496                 return -EINVAL;
2497
2498         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2499         if (netif_running(dev))
2500                 bnx2x_set_eth_mac(bp, 1);
2501
2502         return 0;
2503 }
2504
2505 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2506 {
2507         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2508         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2509
2510         /* Common */
2511 #ifdef BCM_CNIC
2512         if (IS_FCOE_IDX(fp_index)) {
2513                 memset(sb, 0, sizeof(union host_hc_status_block));
2514                 fp->status_blk_mapping = 0;
2515
2516         } else {
2517 #endif
2518                 /* status blocks */
2519                 if (CHIP_IS_E2(bp))
2520                         BNX2X_PCI_FREE(sb->e2_sb,
2521                                        bnx2x_fp(bp, fp_index,
2522                                                 status_blk_mapping),
2523                                        sizeof(struct host_hc_status_block_e2));
2524                 else
2525                         BNX2X_PCI_FREE(sb->e1x_sb,
2526                                        bnx2x_fp(bp, fp_index,
2527                                                 status_blk_mapping),
2528                                        sizeof(struct host_hc_status_block_e1x));
2529 #ifdef BCM_CNIC
2530         }
2531 #endif
2532         /* Rx */
2533         if (!skip_rx_queue(bp, fp_index)) {
2534                 bnx2x_free_rx_bds(fp);
2535
2536                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2537                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2538                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2539                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
2540                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
2541
2542                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2543                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
2544                                sizeof(struct eth_fast_path_rx_cqe) *
2545                                NUM_RCQ_BD);
2546
2547                 /* SGE ring */
2548                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2549                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2550                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
2551                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2552         }
2553
2554         /* Tx */
2555         if (!skip_tx_queue(bp, fp_index)) {
2556                 /* fastpath tx rings: tx_buf tx_desc */
2557                 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2558                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2559                                bnx2x_fp(bp, fp_index, tx_desc_mapping),
2560                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2561         }
2562         /* end of fastpath */
2563 }
2564
2565 void bnx2x_free_fp_mem(struct bnx2x *bp)
2566 {
2567         int i;
2568         for_each_queue(bp, i)
2569                 bnx2x_free_fp_mem_at(bp, i);
2570 }
2571
2572 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2573 {
2574         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2575         if (CHIP_IS_E2(bp)) {
2576                 bnx2x_fp(bp, index, sb_index_values) =
2577                         (__le16 *)status_blk.e2_sb->sb.index_values;
2578                 bnx2x_fp(bp, index, sb_running_index) =
2579                         (__le16 *)status_blk.e2_sb->sb.running_index;
2580         } else {
2581                 bnx2x_fp(bp, index, sb_index_values) =
2582                         (__le16 *)status_blk.e1x_sb->sb.index_values;
2583                 bnx2x_fp(bp, index, sb_running_index) =
2584                         (__le16 *)status_blk.e1x_sb->sb.running_index;
2585         }
2586 }
2587
2588 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2589 {
2590         union host_hc_status_block *sb;
2591         struct bnx2x_fastpath *fp = &bp->fp[index];
2592         int ring_size = 0;
2593
2594         /* if rx_ring_size specified - use it */
2595         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2596                            MAX_RX_AVAIL/bp->num_queues;
2597
2598         /* allocate at least number of buffers required by FW */
2599         rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2600                                                     MIN_RX_SIZE_TPA,
2601                                   rx_ring_size);
2602
2603         bnx2x_fp(bp, index, bp) = bp;
2604         bnx2x_fp(bp, index, index) = index;
2605
2606         /* Common */
2607         sb = &bnx2x_fp(bp, index, status_blk);
2608 #ifdef BCM_CNIC
2609         if (!IS_FCOE_IDX(index)) {
2610 #endif
2611                 /* status blocks */
2612                 if (CHIP_IS_E2(bp))
2613                         BNX2X_PCI_ALLOC(sb->e2_sb,
2614                                 &bnx2x_fp(bp, index, status_blk_mapping),
2615                                 sizeof(struct host_hc_status_block_e2));
2616                 else
2617                         BNX2X_PCI_ALLOC(sb->e1x_sb,
2618                                 &bnx2x_fp(bp, index, status_blk_mapping),
2619                             sizeof(struct host_hc_status_block_e1x));
2620 #ifdef BCM_CNIC
2621         }
2622 #endif
2623         set_sb_shortcuts(bp, index);
2624
2625         /* Tx */
2626         if (!skip_tx_queue(bp, index)) {
2627                 /* fastpath tx rings: tx_buf tx_desc */
2628                 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2629                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2630                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2631                                 &bnx2x_fp(bp, index, tx_desc_mapping),
2632                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2633         }
2634
2635         /* Rx */
2636         if (!skip_rx_queue(bp, index)) {
2637                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2638                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2639                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2640                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2641                                 &bnx2x_fp(bp, index, rx_desc_mapping),
2642                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2643
2644                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2645                                 &bnx2x_fp(bp, index, rx_comp_mapping),
2646                                 sizeof(struct eth_fast_path_rx_cqe) *
2647                                 NUM_RCQ_BD);
2648
2649                 /* SGE ring */
2650                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2651                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2652                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2653                                 &bnx2x_fp(bp, index, rx_sge_mapping),
2654                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2655                 /* RX BD ring */
2656                 bnx2x_set_next_page_rx_bd(fp);
2657
2658                 /* CQ ring */
2659                 bnx2x_set_next_page_rx_cq(fp);
2660
2661                 /* BDs */
2662                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2663                 if (ring_size < rx_ring_size)
2664                         goto alloc_mem_err;
2665         }
2666
2667         return 0;
2668
2669 /* handles low memory cases */
2670 alloc_mem_err:
2671         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2672                                                 index, ring_size);
2673         /* FW will drop all packets if queue is not big enough,
2674          * In these cases we disable the queue
2675          * Min size diferent for TPA and non-TPA queues
2676          */
2677         if (ring_size < (fp->disable_tpa ?
2678                                 MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) {
2679                         /* release memory allocated for this queue */
2680                         bnx2x_free_fp_mem_at(bp, index);
2681                         return -ENOMEM;
2682         }
2683         return 0;
2684 }
2685
2686 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2687 {
2688         int i;
2689
2690         /**
2691          * 1. Allocate FP for leading - fatal if error
2692          * 2. {CNIC} Allocate FCoE FP - fatal if error
2693          * 3. Allocate RSS - fix number of queues if error
2694          */
2695
2696         /* leading */
2697         if (bnx2x_alloc_fp_mem_at(bp, 0))
2698                 return -ENOMEM;
2699 #ifdef BCM_CNIC
2700         /* FCoE */
2701         if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2702                 return -ENOMEM;
2703 #endif
2704         /* RSS */
2705         for_each_nondefault_eth_queue(bp, i)
2706                 if (bnx2x_alloc_fp_mem_at(bp, i))
2707                         break;
2708
2709         /* handle memory failures */
2710         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2711                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2712
2713                 WARN_ON(delta < 0);
2714 #ifdef BCM_CNIC
2715                 /**
2716                  * move non eth FPs next to last eth FP
2717                  * must be done in that order
2718                  * FCOE_IDX < FWD_IDX < OOO_IDX
2719                  */
2720
2721                 /* move FCoE fp */
2722                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2723 #endif
2724                 bp->num_queues -= delta;
2725                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2726                           bp->num_queues + delta, bp->num_queues);
2727         }
2728
2729         return 0;
2730 }
2731
2732 static int bnx2x_setup_irqs(struct bnx2x *bp)
2733 {
2734         int rc = 0;
2735         if (bp->flags & USING_MSIX_FLAG) {
2736                 rc = bnx2x_req_msix_irqs(bp);
2737                 if (rc)
2738                         return rc;
2739         } else {
2740                 bnx2x_ack_int(bp);
2741                 rc = bnx2x_req_irq(bp);
2742                 if (rc) {
2743                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
2744                         return rc;
2745                 }
2746                 if (bp->flags & USING_MSI_FLAG) {
2747                         bp->dev->irq = bp->pdev->irq;
2748                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
2749                                bp->pdev->irq);
2750                 }
2751         }
2752
2753         return 0;
2754 }
2755
2756 void bnx2x_free_mem_bp(struct bnx2x *bp)
2757 {
2758         kfree(bp->fp);
2759         kfree(bp->msix_table);
2760         kfree(bp->ilt);
2761 }
2762
2763 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2764 {
2765         struct bnx2x_fastpath *fp;
2766         struct msix_entry *tbl;
2767         struct bnx2x_ilt *ilt;
2768
2769         /* fp array */
2770         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2771         if (!fp)
2772                 goto alloc_err;
2773         bp->fp = fp;
2774
2775         /* msix table */
2776         tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2777                                   GFP_KERNEL);
2778         if (!tbl)
2779                 goto alloc_err;
2780         bp->msix_table = tbl;
2781
2782         /* ilt */
2783         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2784         if (!ilt)
2785                 goto alloc_err;
2786         bp->ilt = ilt;
2787
2788         return 0;
2789 alloc_err:
2790         bnx2x_free_mem_bp(bp);
2791         return -ENOMEM;
2792
2793 }
2794
2795 static int bnx2x_reload_if_running(struct net_device *dev)
2796 {
2797         struct bnx2x *bp = netdev_priv(dev);
2798
2799         if (unlikely(!netif_running(dev)))
2800                 return 0;
2801
2802         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2803         return bnx2x_nic_load(bp, LOAD_NORMAL);
2804 }
2805
2806 /* called with rtnl_lock */
2807 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2808 {
2809         struct bnx2x *bp = netdev_priv(dev);
2810
2811         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2812                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2813                 return -EAGAIN;
2814         }
2815
2816         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2817             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2818                 return -EINVAL;
2819
2820         /* This does not race with packet allocation
2821          * because the actual alloc size is
2822          * only updated as part of load
2823          */
2824         dev->mtu = new_mtu;
2825
2826         return bnx2x_reload_if_running(dev);
2827 }
2828
2829 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2830 {
2831         struct bnx2x *bp = netdev_priv(dev);
2832
2833         /* TPA requires Rx CSUM offloading */
2834         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2835                 features &= ~NETIF_F_LRO;
2836
2837         return features;
2838 }
2839
2840 int bnx2x_set_features(struct net_device *dev, u32 features)
2841 {
2842         struct bnx2x *bp = netdev_priv(dev);
2843         u32 flags = bp->flags;
2844         bool bnx2x_reload = false;
2845
2846         if (features & NETIF_F_LRO)
2847                 flags |= TPA_ENABLE_FLAG;
2848         else
2849                 flags &= ~TPA_ENABLE_FLAG;
2850
2851         if (features & NETIF_F_LOOPBACK) {
2852                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2853                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
2854                         bnx2x_reload = true;
2855                 }
2856         } else {
2857                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2858                         bp->link_params.loopback_mode = LOOPBACK_NONE;
2859                         bnx2x_reload = true;
2860                 }
2861         }
2862
2863         if (flags ^ bp->flags) {
2864                 bp->flags = flags;
2865                 bnx2x_reload = true;
2866         }
2867
2868         if (bnx2x_reload) {
2869                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2870                         return bnx2x_reload_if_running(dev);
2871                 /* else: bnx2x_nic_load() will be called at end of recovery */
2872         }
2873
2874         return 0;
2875 }
2876
2877 void bnx2x_tx_timeout(struct net_device *dev)
2878 {
2879         struct bnx2x *bp = netdev_priv(dev);
2880
2881 #ifdef BNX2X_STOP_ON_ERROR
2882         if (!bp->panic)
2883                 bnx2x_panic();
2884 #endif
2885         /* This allows the netif to be shutdown gracefully before resetting */
2886         schedule_delayed_work(&bp->reset_task, 0);
2887 }
2888
2889 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2890 {
2891         struct net_device *dev = pci_get_drvdata(pdev);
2892         struct bnx2x *bp;
2893
2894         if (!dev) {
2895                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2896                 return -ENODEV;
2897         }
2898         bp = netdev_priv(dev);
2899
2900         rtnl_lock();
2901
2902         pci_save_state(pdev);
2903
2904         if (!netif_running(dev)) {
2905                 rtnl_unlock();
2906                 return 0;
2907         }
2908
2909         netif_device_detach(dev);
2910
2911         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2912
2913         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2914
2915         rtnl_unlock();
2916
2917         return 0;
2918 }
2919
2920 int bnx2x_resume(struct pci_dev *pdev)
2921 {
2922         struct net_device *dev = pci_get_drvdata(pdev);
2923         struct bnx2x *bp;
2924         int rc;
2925
2926         if (!dev) {
2927                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2928                 return -ENODEV;
2929         }
2930         bp = netdev_priv(dev);
2931
2932         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2933                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2934                 return -EAGAIN;
2935         }
2936
2937         rtnl_lock();
2938
2939         pci_restore_state(pdev);
2940
2941         if (!netif_running(dev)) {
2942                 rtnl_unlock();
2943                 return 0;
2944         }
2945
2946         bnx2x_set_power_state(bp, PCI_D0);
2947         netif_device_attach(dev);
2948
2949         /* Since the chip was reset, clear the FW sequence number */
2950         bp->fw_seq = 0;
2951         rc = bnx2x_nic_load(bp, LOAD_OPEN);
2952
2953         rtnl_unlock();
2954
2955         return rc;
2956 }