Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/ip.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
25
26 #include "bnx2x_init.h"
27
28 static int bnx2x_setup_irqs(struct bnx2x *bp);
29
30 /* free skb in the packet ring at pos idx
31  * return idx of last bd freed
32  */
33 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34                              u16 idx)
35 {
36         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37         struct eth_tx_start_bd *tx_start_bd;
38         struct eth_tx_bd *tx_data_bd;
39         struct sk_buff *skb = tx_buf->skb;
40         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41         int nbd;
42
43         /* prefetch skb end pointer to speedup dev_kfree_skb() */
44         prefetch(&skb->end);
45
46         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
47            idx, tx_buf, skb);
48
49         /* unmap first bd */
50         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
53                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
54
55         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56 #ifdef BNX2X_STOP_ON_ERROR
57         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58                 BNX2X_ERR("BAD nbd!\n");
59                 bnx2x_panic();
60         }
61 #endif
62         new_cons = nbd + tx_buf->first_bd;
63
64         /* Get the next bd */
65         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67         /* Skip a parse bd... */
68         --nbd;
69         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71         /* ...and the TSO split header bd since they have no mapping */
72         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73                 --nbd;
74                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75         }
76
77         /* now free frags */
78         while (nbd > 0) {
79
80                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84                 if (--nbd)
85                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86         }
87
88         /* release skb */
89         WARN_ON(!skb);
90         dev_kfree_skb(skb);
91         tx_buf->first_bd = 0;
92         tx_buf->skb = NULL;
93
94         return new_cons;
95 }
96
97 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98 {
99         struct bnx2x *bp = fp->bp;
100         struct netdev_queue *txq;
101         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103 #ifdef BNX2X_STOP_ON_ERROR
104         if (unlikely(bp->panic))
105                 return -1;
106 #endif
107
108         txq = netdev_get_tx_queue(bp->dev, fp->index);
109         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110         sw_cons = fp->tx_pkt_cons;
111
112         while (sw_cons != hw_cons) {
113                 u16 pkt_cons;
114
115                 pkt_cons = TX_BD(sw_cons);
116
117                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
118                                       " pkt_cons %u\n",
119                    fp->index, hw_cons, sw_cons, pkt_cons);
120
121                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122                 sw_cons++;
123         }
124
125         fp->tx_pkt_cons = sw_cons;
126         fp->tx_bd_cons = bd_cons;
127
128         /* Need to make the tx_bd_cons update visible to start_xmit()
129          * before checking for netif_tx_queue_stopped().  Without the
130          * memory barrier, there is a small possibility that
131          * start_xmit() will miss it and cause the queue to be stopped
132          * forever.
133          */
134         smp_mb();
135
136         if (unlikely(netif_tx_queue_stopped(txq))) {
137                 /* Taking tx_lock() is needed to prevent reenabling the queue
138                  * while it's empty. This could have happen if rx_action() gets
139                  * suspended in bnx2x_tx_int() after the condition before
140                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141                  *
142                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
143                  * sends some packets consuming the whole queue again->
144                  * stops the queue
145                  */
146
147                 __netif_tx_lock(txq, smp_processor_id());
148
149                 if ((netif_tx_queue_stopped(txq)) &&
150                     (bp->state == BNX2X_STATE_OPEN) &&
151                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152                         netif_tx_wake_queue(txq);
153
154                 __netif_tx_unlock(txq);
155         }
156         return 0;
157 }
158
159 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160                                              u16 idx)
161 {
162         u16 last_max = fp->last_max_sge;
163
164         if (SUB_S16(idx, last_max) > 0)
165                 fp->last_max_sge = idx;
166 }
167
168 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169                                   struct eth_fast_path_rx_cqe *fp_cqe)
170 {
171         struct bnx2x *bp = fp->bp;
172         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
174                       SGE_PAGE_SHIFT;
175         u16 last_max, last_elem, first_elem;
176         u16 delta = 0;
177         u16 i;
178
179         if (!sge_len)
180                 return;
181
182         /* First mark all used pages */
183         for (i = 0; i < sge_len; i++)
184                 SGE_MASK_CLEAR_BIT(fp,
185                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
186
187         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
188            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
189
190         /* Here we assume that the last SGE index is the biggest */
191         prefetch((void *)(fp->sge_mask));
192         bnx2x_update_last_max_sge(fp,
193                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
194
195         last_max = RX_SGE(fp->last_max_sge);
196         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199         /* If ring is not full */
200         if (last_elem + 1 != first_elem)
201                 last_elem++;
202
203         /* Now update the prod */
204         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205                 if (likely(fp->sge_mask[i]))
206                         break;
207
208                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209                 delta += RX_SGE_MASK_ELEM_SZ;
210         }
211
212         if (delta > 0) {
213                 fp->rx_sge_prod += delta;
214                 /* clear page-end entries */
215                 bnx2x_clear_sge_mask_next_elems(fp);
216         }
217
218         DP(NETIF_MSG_RX_STATUS,
219            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
220            fp->last_max_sge, fp->rx_sge_prod);
221 }
222
223 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224                             struct sk_buff *skb, u16 cons, u16 prod)
225 {
226         struct bnx2x *bp = fp->bp;
227         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230         dma_addr_t mapping;
231
232         /* move empty skb from pool to prod and map it */
233         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235                                  bp->rx_buf_size, DMA_FROM_DEVICE);
236         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238         /* move partial skb from cons to pool (don't unmap yet) */
239         fp->tpa_pool[queue] = *cons_rx_buf;
240
241         /* mark bin state as start - print error if current state != stop */
242         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245         fp->tpa_state[queue] = BNX2X_TPA_START;
246
247         /* point prod_bd to new skb */
248         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251 #ifdef BNX2X_STOP_ON_ERROR
252         fp->tpa_queue_used |= (1 << queue);
253 #ifdef _ASM_GENERIC_INT_L64_H
254         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255 #else
256         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257 #endif
258            fp->tpa_queue_used);
259 #endif
260 }
261
262 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263                                struct sk_buff *skb,
264                                struct eth_fast_path_rx_cqe *fp_cqe,
265                                u16 cqe_idx)
266 {
267         struct sw_rx_page *rx_pg, old_rx_pg;
268         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
269         u32 i, frag_len, frag_size, pages;
270         int err;
271         int j;
272
273         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
274         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
275
276         /* This is needed in order to enable forwarding support */
277         if (frag_size)
278                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
279                                                max(frag_size, (u32)len_on_bd));
280
281 #ifdef BNX2X_STOP_ON_ERROR
282         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
283                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
284                           pages, cqe_idx);
285                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
286                           fp_cqe->pkt_len, len_on_bd);
287                 bnx2x_panic();
288                 return -EINVAL;
289         }
290 #endif
291
292         /* Run through the SGL and compose the fragmented skb */
293         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
294                 u16 sge_idx =
295                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
296
297                 /* FW gives the indices of the SGE as if the ring is an array
298                    (meaning that "next" element will consume 2 indices) */
299                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
300                 rx_pg = &fp->rx_page_ring[sge_idx];
301                 old_rx_pg = *rx_pg;
302
303                 /* If we fail to allocate a substitute page, we simply stop
304                    where we are and drop the whole packet */
305                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
306                 if (unlikely(err)) {
307                         fp->eth_q_stats.rx_skb_alloc_failed++;
308                         return err;
309                 }
310
311                 /* Unmap the page as we r going to pass it to the stack */
312                 dma_unmap_page(&bp->pdev->dev,
313                                dma_unmap_addr(&old_rx_pg, mapping),
314                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
315
316                 /* Add one frag and update the appropriate fields in the skb */
317                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
318
319                 skb->data_len += frag_len;
320                 skb->truesize += frag_len;
321                 skb->len += frag_len;
322
323                 frag_size -= frag_len;
324         }
325
326         return 0;
327 }
328
329 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
330                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
331                            u16 cqe_idx)
332 {
333         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
334         struct sk_buff *skb = rx_buf->skb;
335         /* alloc new skb */
336         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
337
338         /* Unmap skb in the pool anyway, as we are going to change
339            pool entry status to BNX2X_TPA_STOP even if new skb allocation
340            fails. */
341         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
342                          bp->rx_buf_size, DMA_FROM_DEVICE);
343
344         if (likely(new_skb)) {
345                 /* fix ip xsum and give it to the stack */
346                 /* (no need to map the new skb) */
347
348                 prefetch(skb);
349                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
350
351 #ifdef BNX2X_STOP_ON_ERROR
352                 if (pad + len > bp->rx_buf_size) {
353                         BNX2X_ERR("skb_put is about to fail...  "
354                                   "pad %d  len %d  rx_buf_size %d\n",
355                                   pad, len, bp->rx_buf_size);
356                         bnx2x_panic();
357                         return;
358                 }
359 #endif
360
361                 skb_reserve(skb, pad);
362                 skb_put(skb, len);
363
364                 skb->protocol = eth_type_trans(skb, bp->dev);
365                 skb->ip_summed = CHECKSUM_UNNECESSARY;
366
367                 {
368                         struct iphdr *iph;
369
370                         iph = (struct iphdr *)skb->data;
371                         iph->check = 0;
372                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
373                 }
374
375                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376                                          &cqe->fast_path_cqe, cqe_idx)) {
377                         if ((le16_to_cpu(cqe->fast_path_cqe.
378                             pars_flags.flags) & PARSING_FLAGS_VLAN))
379                                 __vlan_hwaccel_put_tag(skb,
380                                                  le16_to_cpu(cqe->fast_path_cqe.
381                                                              vlan_tag));
382                         napi_gro_receive(&fp->napi, skb);
383                 } else {
384                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
385                            " - dropping packet!\n");
386                         dev_kfree_skb(skb);
387                 }
388
389
390                 /* put new skb in bin */
391                 fp->tpa_pool[queue].skb = new_skb;
392
393         } else {
394                 /* else drop the packet and keep the buffer in the bin */
395                 DP(NETIF_MSG_RX_STATUS,
396                    "Failed to allocate new skb - dropping packet!\n");
397                 fp->eth_q_stats.rx_skb_alloc_failed++;
398         }
399
400         fp->tpa_state[queue] = BNX2X_TPA_STOP;
401 }
402
403 /* Set Toeplitz hash value in the skb using the value from the
404  * CQE (calculated by HW).
405  */
406 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
407                                         struct sk_buff *skb)
408 {
409         /* Set Toeplitz hash from CQE */
410         if ((bp->dev->features & NETIF_F_RXHASH) &&
411             (cqe->fast_path_cqe.status_flags &
412              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
413                 skb->rxhash =
414                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
415 }
416
417 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
418 {
419         struct bnx2x *bp = fp->bp;
420         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
421         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
422         int rx_pkt = 0;
423
424 #ifdef BNX2X_STOP_ON_ERROR
425         if (unlikely(bp->panic))
426                 return 0;
427 #endif
428
429         /* CQ "next element" is of the size of the regular element,
430            that's why it's ok here */
431         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
432         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
433                 hw_comp_cons++;
434
435         bd_cons = fp->rx_bd_cons;
436         bd_prod = fp->rx_bd_prod;
437         bd_prod_fw = bd_prod;
438         sw_comp_cons = fp->rx_comp_cons;
439         sw_comp_prod = fp->rx_comp_prod;
440
441         /* Memory barrier necessary as speculative reads of the rx
442          * buffer can be ahead of the index in the status block
443          */
444         rmb();
445
446         DP(NETIF_MSG_RX_STATUS,
447            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
448            fp->index, hw_comp_cons, sw_comp_cons);
449
450         while (sw_comp_cons != hw_comp_cons) {
451                 struct sw_rx_bd *rx_buf = NULL;
452                 struct sk_buff *skb;
453                 union eth_rx_cqe *cqe;
454                 u8 cqe_fp_flags;
455                 u16 len, pad;
456
457                 comp_ring_cons = RCQ_BD(sw_comp_cons);
458                 bd_prod = RX_BD(bd_prod);
459                 bd_cons = RX_BD(bd_cons);
460
461                 /* Prefetch the page containing the BD descriptor
462                    at producer's index. It will be needed when new skb is
463                    allocated */
464                 prefetch((void *)(PAGE_ALIGN((unsigned long)
465                                              (&fp->rx_desc_ring[bd_prod])) -
466                                   PAGE_SIZE + 1));
467
468                 cqe = &fp->rx_comp_ring[comp_ring_cons];
469                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
470
471                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
472                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
473                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
474                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
475                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
476                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
477
478                 /* is this a slowpath msg? */
479                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
480                         bnx2x_sp_event(fp, cqe);
481                         goto next_cqe;
482
483                 /* this is an rx packet */
484                 } else {
485                         rx_buf = &fp->rx_buf_ring[bd_cons];
486                         skb = rx_buf->skb;
487                         prefetch(skb);
488                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
489                         pad = cqe->fast_path_cqe.placement_offset;
490
491                         /* - If CQE is marked both TPA_START and TPA_END it is
492                          *   a non-TPA CQE.
493                          * - FP CQE will always have either TPA_START or/and
494                          *   TPA_STOP flags set.
495                          */
496                         if ((!fp->disable_tpa) &&
497                             (TPA_TYPE(cqe_fp_flags) !=
498                                         (TPA_TYPE_START | TPA_TYPE_END))) {
499                                 u16 queue = cqe->fast_path_cqe.queue_index;
500
501                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
502                                         DP(NETIF_MSG_RX_STATUS,
503                                            "calling tpa_start on queue %d\n",
504                                            queue);
505
506                                         bnx2x_tpa_start(fp, queue, skb,
507                                                         bd_cons, bd_prod);
508
509                                         /* Set Toeplitz hash for an LRO skb */
510                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
511
512                                         goto next_rx;
513                                 } else { /* TPA_STOP */
514                                         DP(NETIF_MSG_RX_STATUS,
515                                            "calling tpa_stop on queue %d\n",
516                                            queue);
517
518                                         if (!BNX2X_RX_SUM_FIX(cqe))
519                                                 BNX2X_ERR("STOP on none TCP "
520                                                           "data\n");
521
522                                         /* This is a size of the linear data
523                                            on this skb */
524                                         len = le16_to_cpu(cqe->fast_path_cqe.
525                                                                 len_on_bd);
526                                         bnx2x_tpa_stop(bp, fp, queue, pad,
527                                                     len, cqe, comp_ring_cons);
528 #ifdef BNX2X_STOP_ON_ERROR
529                                         if (bp->panic)
530                                                 return 0;
531 #endif
532
533                                         bnx2x_update_sge_prod(fp,
534                                                         &cqe->fast_path_cqe);
535                                         goto next_cqe;
536                                 }
537                         }
538
539                         dma_sync_single_for_device(&bp->pdev->dev,
540                                         dma_unmap_addr(rx_buf, mapping),
541                                                    pad + RX_COPY_THRESH,
542                                                    DMA_FROM_DEVICE);
543                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
544
545                         /* is this an error packet? */
546                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
547                                 DP(NETIF_MSG_RX_ERR,
548                                    "ERROR  flags %x  rx packet %u\n",
549                                    cqe_fp_flags, sw_comp_cons);
550                                 fp->eth_q_stats.rx_err_discard_pkt++;
551                                 goto reuse_rx;
552                         }
553
554                         /* Since we don't have a jumbo ring
555                          * copy small packets if mtu > 1500
556                          */
557                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
558                             (len <= RX_COPY_THRESH)) {
559                                 struct sk_buff *new_skb;
560
561                                 new_skb = netdev_alloc_skb(bp->dev,
562                                                            len + pad);
563                                 if (new_skb == NULL) {
564                                         DP(NETIF_MSG_RX_ERR,
565                                            "ERROR  packet dropped "
566                                            "because of alloc failure\n");
567                                         fp->eth_q_stats.rx_skb_alloc_failed++;
568                                         goto reuse_rx;
569                                 }
570
571                                 /* aligned copy */
572                                 skb_copy_from_linear_data_offset(skb, pad,
573                                                     new_skb->data + pad, len);
574                                 skb_reserve(new_skb, pad);
575                                 skb_put(new_skb, len);
576
577                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
578
579                                 skb = new_skb;
580
581                         } else
582                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
583                                 dma_unmap_single(&bp->pdev->dev,
584                                         dma_unmap_addr(rx_buf, mapping),
585                                                  bp->rx_buf_size,
586                                                  DMA_FROM_DEVICE);
587                                 skb_reserve(skb, pad);
588                                 skb_put(skb, len);
589
590                         } else {
591                                 DP(NETIF_MSG_RX_ERR,
592                                    "ERROR  packet dropped because "
593                                    "of alloc failure\n");
594                                 fp->eth_q_stats.rx_skb_alloc_failed++;
595 reuse_rx:
596                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
597                                 goto next_rx;
598                         }
599
600                         skb->protocol = eth_type_trans(skb, bp->dev);
601
602                         /* Set Toeplitz hash for a none-LRO skb */
603                         bnx2x_set_skb_rxhash(bp, cqe, skb);
604
605                         skb_checksum_none_assert(skb);
606
607                         if (bp->rx_csum) {
608                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
609                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
610                                 else
611                                         fp->eth_q_stats.hw_csum_err++;
612                         }
613                 }
614
615                 skb_record_rx_queue(skb, fp->index);
616
617                 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
618                      PARSING_FLAGS_VLAN)
619                         __vlan_hwaccel_put_tag(skb,
620                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
621                 napi_gro_receive(&fp->napi, skb);
622
623
624 next_rx:
625                 rx_buf->skb = NULL;
626
627                 bd_cons = NEXT_RX_IDX(bd_cons);
628                 bd_prod = NEXT_RX_IDX(bd_prod);
629                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
630                 rx_pkt++;
631 next_cqe:
632                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
633                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
634
635                 if (rx_pkt == budget)
636                         break;
637         } /* while */
638
639         fp->rx_bd_cons = bd_cons;
640         fp->rx_bd_prod = bd_prod_fw;
641         fp->rx_comp_cons = sw_comp_cons;
642         fp->rx_comp_prod = sw_comp_prod;
643
644         /* Update producers */
645         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
646                              fp->rx_sge_prod);
647
648         fp->rx_pkt += rx_pkt;
649         fp->rx_calls++;
650
651         return rx_pkt;
652 }
653
654 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
655 {
656         struct bnx2x_fastpath *fp = fp_cookie;
657         struct bnx2x *bp = fp->bp;
658
659         /* Return here if interrupt is disabled */
660         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
661                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
662                 return IRQ_HANDLED;
663         }
664
665         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
666                          "[fp %d fw_sd %d igusb %d]\n",
667            fp->index, fp->fw_sb_id, fp->igu_sb_id);
668         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
669
670 #ifdef BNX2X_STOP_ON_ERROR
671         if (unlikely(bp->panic))
672                 return IRQ_HANDLED;
673 #endif
674
675         /* Handle Rx and Tx according to MSI-X vector */
676         prefetch(fp->rx_cons_sb);
677         prefetch(fp->tx_cons_sb);
678         prefetch(&fp->sb_running_index[SM_RX_ID]);
679         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
680
681         return IRQ_HANDLED;
682 }
683
684 /* HW Lock for shared dual port PHYs */
685 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
686 {
687         mutex_lock(&bp->port.phy_mutex);
688
689         if (bp->port.need_hw_lock)
690                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
691 }
692
693 void bnx2x_release_phy_lock(struct bnx2x *bp)
694 {
695         if (bp->port.need_hw_lock)
696                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
697
698         mutex_unlock(&bp->port.phy_mutex);
699 }
700
701 void bnx2x_link_report(struct bnx2x *bp)
702 {
703         if (bp->flags & MF_FUNC_DIS) {
704                 netif_carrier_off(bp->dev);
705                 netdev_err(bp->dev, "NIC Link is Down\n");
706                 return;
707         }
708
709         if (bp->link_vars.link_up) {
710                 u16 line_speed;
711
712                 if (bp->state == BNX2X_STATE_OPEN)
713                         netif_carrier_on(bp->dev);
714                 netdev_info(bp->dev, "NIC Link is Up, ");
715
716                 line_speed = bp->link_vars.line_speed;
717                 if (IS_MF(bp)) {
718                         u16 vn_max_rate;
719
720                         vn_max_rate =
721                                 ((bp->mf_config[BP_VN(bp)] &
722                                   FUNC_MF_CFG_MAX_BW_MASK) >>
723                                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
724                         if (vn_max_rate < line_speed)
725                                 line_speed = vn_max_rate;
726                 }
727                 pr_cont("%d Mbps ", line_speed);
728
729                 if (bp->link_vars.duplex == DUPLEX_FULL)
730                         pr_cont("full duplex");
731                 else
732                         pr_cont("half duplex");
733
734                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
735                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
736                                 pr_cont(", receive ");
737                                 if (bp->link_vars.flow_ctrl &
738                                     BNX2X_FLOW_CTRL_TX)
739                                         pr_cont("& transmit ");
740                         } else {
741                                 pr_cont(", transmit ");
742                         }
743                         pr_cont("flow control ON");
744                 }
745                 pr_cont("\n");
746
747         } else { /* link_down */
748                 netif_carrier_off(bp->dev);
749                 netdev_err(bp->dev, "NIC Link is Down\n");
750         }
751 }
752
753 /* Returns the number of actually allocated BDs */
754 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
755                                       int rx_ring_size)
756 {
757         struct bnx2x *bp = fp->bp;
758         u16 ring_prod, cqe_ring_prod;
759         int i;
760
761         fp->rx_comp_cons = 0;
762         cqe_ring_prod = ring_prod = 0;
763         for (i = 0; i < rx_ring_size; i++) {
764                 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
765                         BNX2X_ERR("was only able to allocate "
766                                   "%d rx skbs on queue[%d]\n", i, fp->index);
767                         fp->eth_q_stats.rx_skb_alloc_failed++;
768                         break;
769                 }
770                 ring_prod = NEXT_RX_IDX(ring_prod);
771                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
772                 WARN_ON(ring_prod <= i);
773         }
774
775         fp->rx_bd_prod = ring_prod;
776         /* Limit the CQE producer by the CQE ring size */
777         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
778                                cqe_ring_prod);
779         fp->rx_pkt = fp->rx_calls = 0;
780
781         return i;
782 }
783
784 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
785 {
786         struct bnx2x *bp = fp->bp;
787         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
788                                               MAX_RX_AVAIL/bp->num_queues;
789
790         rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
791
792         bnx2x_alloc_rx_bds(fp, rx_ring_size);
793
794         /* Warning!
795          * this will generate an interrupt (to the TSTORM)
796          * must only be done after chip is initialized
797          */
798         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
799                              fp->rx_sge_prod);
800 }
801
802 void bnx2x_init_rx_rings(struct bnx2x *bp)
803 {
804         int func = BP_FUNC(bp);
805         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
806                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
807         u16 ring_prod;
808         int i, j;
809
810         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
811                 IP_HEADER_ALIGNMENT_PADDING;
812
813         DP(NETIF_MSG_IFUP,
814            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
815
816         for_each_queue(bp, j) {
817                 struct bnx2x_fastpath *fp = &bp->fp[j];
818
819                 if (!fp->disable_tpa) {
820                         for (i = 0; i < max_agg_queues; i++) {
821                                 fp->tpa_pool[i].skb =
822                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
823                                 if (!fp->tpa_pool[i].skb) {
824                                         BNX2X_ERR("Failed to allocate TPA "
825                                                   "skb pool for queue[%d] - "
826                                                   "disabling TPA on this "
827                                                   "queue!\n", j);
828                                         bnx2x_free_tpa_pool(bp, fp, i);
829                                         fp->disable_tpa = 1;
830                                         break;
831                                 }
832                                 dma_unmap_addr_set((struct sw_rx_bd *)
833                                                         &bp->fp->tpa_pool[i],
834                                                    mapping, 0);
835                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
836                         }
837
838                         /* "next page" elements initialization */
839                         bnx2x_set_next_page_sgl(fp);
840
841                         /* set SGEs bit mask */
842                         bnx2x_init_sge_ring_bit_mask(fp);
843
844                         /* Allocate SGEs and initialize the ring elements */
845                         for (i = 0, ring_prod = 0;
846                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
847
848                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
849                                         BNX2X_ERR("was only able to allocate "
850                                                   "%d rx sges\n", i);
851                                         BNX2X_ERR("disabling TPA for"
852                                                   " queue[%d]\n", j);
853                                         /* Cleanup already allocated elements */
854                                         bnx2x_free_rx_sge_range(bp,
855                                                                 fp, ring_prod);
856                                         bnx2x_free_tpa_pool(bp,
857                                                             fp, max_agg_queues);
858                                         fp->disable_tpa = 1;
859                                         ring_prod = 0;
860                                         break;
861                                 }
862                                 ring_prod = NEXT_SGE_IDX(ring_prod);
863                         }
864
865                         fp->rx_sge_prod = ring_prod;
866                 }
867         }
868
869         for_each_queue(bp, j) {
870                 struct bnx2x_fastpath *fp = &bp->fp[j];
871
872                 fp->rx_bd_cons = 0;
873
874                 bnx2x_set_next_page_rx_bd(fp);
875
876                 /* CQ ring */
877                 bnx2x_set_next_page_rx_cq(fp);
878
879                 /* Allocate BDs and initialize BD ring */
880                 bnx2x_alloc_rx_bd_ring(fp);
881
882                 if (j != 0)
883                         continue;
884
885                 if (!CHIP_IS_E2(bp)) {
886                         REG_WR(bp, BAR_USTRORM_INTMEM +
887                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
888                                U64_LO(fp->rx_comp_mapping));
889                         REG_WR(bp, BAR_USTRORM_INTMEM +
890                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
891                                U64_HI(fp->rx_comp_mapping));
892                 }
893         }
894 }
895
896 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
897 {
898         int i;
899
900         for_each_queue(bp, i) {
901                 struct bnx2x_fastpath *fp = &bp->fp[i];
902
903                 u16 bd_cons = fp->tx_bd_cons;
904                 u16 sw_prod = fp->tx_pkt_prod;
905                 u16 sw_cons = fp->tx_pkt_cons;
906
907                 while (sw_cons != sw_prod) {
908                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
909                         sw_cons++;
910                 }
911         }
912 }
913
914 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
915 {
916         int i, j;
917
918         for_each_queue(bp, j) {
919                 struct bnx2x_fastpath *fp = &bp->fp[j];
920
921                 for (i = 0; i < NUM_RX_BD; i++) {
922                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
923                         struct sk_buff *skb = rx_buf->skb;
924
925                         if (skb == NULL)
926                                 continue;
927
928                         dma_unmap_single(&bp->pdev->dev,
929                                          dma_unmap_addr(rx_buf, mapping),
930                                          bp->rx_buf_size, DMA_FROM_DEVICE);
931
932                         rx_buf->skb = NULL;
933                         dev_kfree_skb(skb);
934                 }
935                 if (!fp->disable_tpa)
936                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
937                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
938                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
939         }
940 }
941
942 void bnx2x_free_skbs(struct bnx2x *bp)
943 {
944         bnx2x_free_tx_skbs(bp);
945         bnx2x_free_rx_skbs(bp);
946 }
947
948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
949 {
950         int i, offset = 1;
951
952         free_irq(bp->msix_table[0].vector, bp->dev);
953         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
954            bp->msix_table[0].vector);
955
956 #ifdef BCM_CNIC
957         offset++;
958 #endif
959         for_each_queue(bp, i) {
960                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
961                    "state %x\n", i, bp->msix_table[i + offset].vector,
962                    bnx2x_fp(bp, i, state));
963
964                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
965         }
966 }
967
968 void bnx2x_free_irq(struct bnx2x *bp)
969 {
970         if (bp->flags & USING_MSIX_FLAG)
971                 bnx2x_free_msix_irqs(bp);
972         else if (bp->flags & USING_MSI_FLAG)
973                 free_irq(bp->pdev->irq, bp->dev);
974         else
975                 free_irq(bp->pdev->irq, bp->dev);
976 }
977
978 int bnx2x_enable_msix(struct bnx2x *bp)
979 {
980         int msix_vec = 0, i, rc, req_cnt;
981
982         bp->msix_table[msix_vec].entry = msix_vec;
983         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
984            bp->msix_table[0].entry);
985         msix_vec++;
986
987 #ifdef BCM_CNIC
988         bp->msix_table[msix_vec].entry = msix_vec;
989         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
990            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
991         msix_vec++;
992 #endif
993         for_each_queue(bp, i) {
994                 bp->msix_table[msix_vec].entry = msix_vec;
995                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
996                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
997                 msix_vec++;
998         }
999
1000         req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1001
1002         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1003
1004         /*
1005          * reconfigure number of tx/rx queues according to available
1006          * MSI-X vectors
1007          */
1008         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1009                 /* how less vectors we will have? */
1010                 int diff = req_cnt - rc;
1011
1012                 DP(NETIF_MSG_IFUP,
1013                    "Trying to use less MSI-X vectors: %d\n", rc);
1014
1015                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1016
1017                 if (rc) {
1018                         DP(NETIF_MSG_IFUP,
1019                            "MSI-X is not attainable  rc %d\n", rc);
1020                         return rc;
1021                 }
1022                 /*
1023                  * decrease number of queues by number of unallocated entries
1024                  */
1025                 bp->num_queues -= diff;
1026
1027                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1028                                   bp->num_queues);
1029         } else if (rc) {
1030                 /* fall to INTx if not enough memory */
1031                 if (rc == -ENOMEM)
1032                         bp->flags |= DISABLE_MSI_FLAG;
1033                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1034                 return rc;
1035         }
1036
1037         bp->flags |= USING_MSIX_FLAG;
1038
1039         return 0;
1040 }
1041
1042 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1043 {
1044         int i, rc, offset = 1;
1045
1046         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1047                          bp->dev->name, bp->dev);
1048         if (rc) {
1049                 BNX2X_ERR("request sp irq failed\n");
1050                 return -EBUSY;
1051         }
1052
1053 #ifdef BCM_CNIC
1054         offset++;
1055 #endif
1056         for_each_queue(bp, i) {
1057                 struct bnx2x_fastpath *fp = &bp->fp[i];
1058                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1059                          bp->dev->name, i);
1060
1061                 rc = request_irq(bp->msix_table[offset].vector,
1062                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1063                 if (rc) {
1064                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
1065                         bnx2x_free_msix_irqs(bp);
1066                         return -EBUSY;
1067                 }
1068
1069                 offset++;
1070                 fp->state = BNX2X_FP_STATE_IRQ;
1071         }
1072
1073         i = BNX2X_NUM_QUEUES(bp);
1074         offset = 1 + CNIC_CONTEXT_USE;
1075         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1076                " ... fp[%d] %d\n",
1077                bp->msix_table[0].vector,
1078                0, bp->msix_table[offset].vector,
1079                i - 1, bp->msix_table[offset + i - 1].vector);
1080
1081         return 0;
1082 }
1083
1084 int bnx2x_enable_msi(struct bnx2x *bp)
1085 {
1086         int rc;
1087
1088         rc = pci_enable_msi(bp->pdev);
1089         if (rc) {
1090                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1091                 return -1;
1092         }
1093         bp->flags |= USING_MSI_FLAG;
1094
1095         return 0;
1096 }
1097
1098 static int bnx2x_req_irq(struct bnx2x *bp)
1099 {
1100         unsigned long flags;
1101         int rc;
1102
1103         if (bp->flags & USING_MSI_FLAG)
1104                 flags = 0;
1105         else
1106                 flags = IRQF_SHARED;
1107
1108         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1109                          bp->dev->name, bp->dev);
1110         if (!rc)
1111                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1112
1113         return rc;
1114 }
1115
1116 static void bnx2x_napi_enable(struct bnx2x *bp)
1117 {
1118         int i;
1119
1120         for_each_queue(bp, i)
1121                 napi_enable(&bnx2x_fp(bp, i, napi));
1122 }
1123
1124 static void bnx2x_napi_disable(struct bnx2x *bp)
1125 {
1126         int i;
1127
1128         for_each_queue(bp, i)
1129                 napi_disable(&bnx2x_fp(bp, i, napi));
1130 }
1131
1132 void bnx2x_netif_start(struct bnx2x *bp)
1133 {
1134         int intr_sem;
1135
1136         intr_sem = atomic_dec_and_test(&bp->intr_sem);
1137         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1138
1139         if (intr_sem) {
1140                 if (netif_running(bp->dev)) {
1141                         bnx2x_napi_enable(bp);
1142                         bnx2x_int_enable(bp);
1143                         if (bp->state == BNX2X_STATE_OPEN)
1144                                 netif_tx_wake_all_queues(bp->dev);
1145                 }
1146         }
1147 }
1148
1149 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1150 {
1151         bnx2x_int_disable_sync(bp, disable_hw);
1152         bnx2x_napi_disable(bp);
1153         netif_tx_disable(bp->dev);
1154 }
1155
1156 void bnx2x_set_num_queues(struct bnx2x *bp)
1157 {
1158         switch (bp->multi_mode) {
1159         case ETH_RSS_MODE_DISABLED:
1160                 bp->num_queues = 1;
1161                 break;
1162         case ETH_RSS_MODE_REGULAR:
1163                 bp->num_queues = bnx2x_calc_num_queues(bp);
1164                 break;
1165
1166         default:
1167                 bp->num_queues = 1;
1168                 break;
1169         }
1170 }
1171
1172 static void bnx2x_release_firmware(struct bnx2x *bp)
1173 {
1174         kfree(bp->init_ops_offsets);
1175         kfree(bp->init_ops);
1176         kfree(bp->init_data);
1177         release_firmware(bp->firmware);
1178 }
1179
1180 /* must be called with rtnl_lock */
1181 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1182 {
1183         u32 load_code;
1184         int i, rc;
1185
1186         /* Set init arrays */
1187         rc = bnx2x_init_firmware(bp);
1188         if (rc) {
1189                 BNX2X_ERR("Error loading firmware\n");
1190                 return rc;
1191         }
1192
1193 #ifdef BNX2X_STOP_ON_ERROR
1194         if (unlikely(bp->panic))
1195                 return -EPERM;
1196 #endif
1197
1198         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1199
1200         /* must be called before memory allocation and HW init */
1201         bnx2x_ilt_set_info(bp);
1202
1203         if (bnx2x_alloc_mem(bp))
1204                 return -ENOMEM;
1205
1206         netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1207         rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1208         if (rc) {
1209                 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1210                 goto load_error0;
1211         }
1212
1213         for_each_queue(bp, i)
1214                 bnx2x_fp(bp, i, disable_tpa) =
1215                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1216
1217         bnx2x_napi_enable(bp);
1218
1219         /* Send LOAD_REQUEST command to MCP
1220            Returns the type of LOAD command:
1221            if it is the first port to be initialized
1222            common blocks should be initialized, otherwise - not
1223         */
1224         if (!BP_NOMCP(bp)) {
1225                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1226                 if (!load_code) {
1227                         BNX2X_ERR("MCP response failure, aborting\n");
1228                         rc = -EBUSY;
1229                         goto load_error1;
1230                 }
1231                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1232                         rc = -EBUSY; /* other port in diagnostic mode */
1233                         goto load_error1;
1234                 }
1235
1236         } else {
1237                 int path = BP_PATH(bp);
1238                 int port = BP_PORT(bp);
1239
1240                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1241                    path, load_count[path][0], load_count[path][1],
1242                    load_count[path][2]);
1243                 load_count[path][0]++;
1244                 load_count[path][1 + port]++;
1245                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1246                    path, load_count[path][0], load_count[path][1],
1247                    load_count[path][2]);
1248                 if (load_count[path][0] == 1)
1249                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1250                 else if (load_count[path][1 + port] == 1)
1251                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1252                 else
1253                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1254         }
1255
1256         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1257             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1258             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1259                 bp->port.pmf = 1;
1260         else
1261                 bp->port.pmf = 0;
1262         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1263
1264         /* Initialize HW */
1265         rc = bnx2x_init_hw(bp, load_code);
1266         if (rc) {
1267                 BNX2X_ERR("HW init failed, aborting\n");
1268                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1269                 goto load_error2;
1270         }
1271
1272         /* Connect to IRQs */
1273         rc = bnx2x_setup_irqs(bp);
1274         if (rc) {
1275                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1276                 goto load_error2;
1277         }
1278
1279         /* Setup NIC internals and enable interrupts */
1280         bnx2x_nic_init(bp, load_code);
1281
1282         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1283             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1284             (bp->common.shmem2_base))
1285                 SHMEM2_WR(bp, dcc_support,
1286                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1287                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1288
1289         /* Send LOAD_DONE command to MCP */
1290         if (!BP_NOMCP(bp)) {
1291                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1292                 if (!load_code) {
1293                         BNX2X_ERR("MCP response failure, aborting\n");
1294                         rc = -EBUSY;
1295                         goto load_error3;
1296                 }
1297         }
1298
1299         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1300
1301         rc = bnx2x_func_start(bp);
1302         if (rc) {
1303                 BNX2X_ERR("Function start failed!\n");
1304 #ifndef BNX2X_STOP_ON_ERROR
1305                 goto load_error3;
1306 #else
1307                 bp->panic = 1;
1308                 return -EBUSY;
1309 #endif
1310         }
1311
1312         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1313         if (rc) {
1314                 BNX2X_ERR("Setup leading failed!\n");
1315 #ifndef BNX2X_STOP_ON_ERROR
1316                 goto load_error3;
1317 #else
1318                 bp->panic = 1;
1319                 return -EBUSY;
1320 #endif
1321         }
1322
1323         if (!CHIP_IS_E1(bp) &&
1324             (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1325                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1326                 bp->flags |= MF_FUNC_DIS;
1327         }
1328
1329 #ifdef BCM_CNIC
1330         /* Enable Timer scan */
1331         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1332 #endif
1333
1334         for_each_nondefault_queue(bp, i) {
1335                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1336                 if (rc)
1337 #ifdef BCM_CNIC
1338                         goto load_error4;
1339 #else
1340                         goto load_error3;
1341 #endif
1342         }
1343
1344         /* Now when Clients are configured we are ready to work */
1345         bp->state = BNX2X_STATE_OPEN;
1346
1347         bnx2x_set_eth_mac(bp, 1);
1348
1349         if (bp->port.pmf)
1350                 bnx2x_initial_phy_init(bp, load_mode);
1351
1352         /* Start fast path */
1353         switch (load_mode) {
1354         case LOAD_NORMAL:
1355                 /* Tx queue should be only reenabled */
1356                 netif_tx_wake_all_queues(bp->dev);
1357                 /* Initialize the receive filter. */
1358                 bnx2x_set_rx_mode(bp->dev);
1359                 break;
1360
1361         case LOAD_OPEN:
1362                 netif_tx_start_all_queues(bp->dev);
1363                 smp_mb__after_clear_bit();
1364                 /* Initialize the receive filter. */
1365                 bnx2x_set_rx_mode(bp->dev);
1366                 break;
1367
1368         case LOAD_DIAG:
1369                 /* Initialize the receive filter. */
1370                 bnx2x_set_rx_mode(bp->dev);
1371                 bp->state = BNX2X_STATE_DIAG;
1372                 break;
1373
1374         default:
1375                 break;
1376         }
1377
1378         if (!bp->port.pmf)
1379                 bnx2x__link_status_update(bp);
1380
1381         /* start the timer */
1382         mod_timer(&bp->timer, jiffies + bp->current_interval);
1383
1384 #ifdef BCM_CNIC
1385         bnx2x_setup_cnic_irq_info(bp);
1386         if (bp->state == BNX2X_STATE_OPEN)
1387                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1388 #endif
1389         bnx2x_inc_load_cnt(bp);
1390
1391         bnx2x_release_firmware(bp);
1392
1393         return 0;
1394
1395 #ifdef BCM_CNIC
1396 load_error4:
1397         /* Disable Timer scan */
1398         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1399 #endif
1400 load_error3:
1401         bnx2x_int_disable_sync(bp, 1);
1402
1403         /* Free SKBs, SGEs, TPA pool and driver internals */
1404         bnx2x_free_skbs(bp);
1405         for_each_queue(bp, i)
1406                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1407
1408         /* Release IRQs */
1409         bnx2x_free_irq(bp);
1410 load_error2:
1411         if (!BP_NOMCP(bp)) {
1412                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1413                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1414         }
1415
1416         bp->port.pmf = 0;
1417 load_error1:
1418         bnx2x_napi_disable(bp);
1419 load_error0:
1420         bnx2x_free_mem(bp);
1421
1422         bnx2x_release_firmware(bp);
1423
1424         return rc;
1425 }
1426
1427 /* must be called with rtnl_lock */
1428 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1429 {
1430         int i;
1431
1432         if (bp->state == BNX2X_STATE_CLOSED) {
1433                 /* Interface has been removed - nothing to recover */
1434                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1435                 bp->is_leader = 0;
1436                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1437                 smp_wmb();
1438
1439                 return -EINVAL;
1440         }
1441
1442 #ifdef BCM_CNIC
1443         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1444 #endif
1445         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1446
1447         /* Set "drop all" */
1448         bp->rx_mode = BNX2X_RX_MODE_NONE;
1449         bnx2x_set_storm_rx_mode(bp);
1450
1451         /* Stop Tx */
1452         bnx2x_tx_disable(bp);
1453
1454         del_timer_sync(&bp->timer);
1455
1456         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1457                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1458
1459         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1460
1461         /* Cleanup the chip if needed */
1462         if (unload_mode != UNLOAD_RECOVERY)
1463                 bnx2x_chip_cleanup(bp, unload_mode);
1464         else {
1465                 /* Disable HW interrupts, NAPI and Tx */
1466                 bnx2x_netif_stop(bp, 1);
1467
1468                 /* Release IRQs */
1469                 bnx2x_free_irq(bp);
1470         }
1471
1472         bp->port.pmf = 0;
1473
1474         /* Free SKBs, SGEs, TPA pool and driver internals */
1475         bnx2x_free_skbs(bp);
1476         for_each_queue(bp, i)
1477                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1478
1479         bnx2x_free_mem(bp);
1480
1481         bp->state = BNX2X_STATE_CLOSED;
1482
1483         /* The last driver must disable a "close the gate" if there is no
1484          * parity attention or "process kill" pending.
1485          */
1486         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1487             bnx2x_reset_is_done(bp))
1488                 bnx2x_disable_close_the_gate(bp);
1489
1490         /* Reset MCP mail box sequence if there is on going recovery */
1491         if (unload_mode == UNLOAD_RECOVERY)
1492                 bp->fw_seq = 0;
1493
1494         return 0;
1495 }
1496
1497 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1498 {
1499         u16 pmcsr;
1500
1501         /* If there is no power capability, silently succeed */
1502         if (!bp->pm_cap) {
1503                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1504                 return 0;
1505         }
1506
1507         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1508
1509         switch (state) {
1510         case PCI_D0:
1511                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1512                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1513                                        PCI_PM_CTRL_PME_STATUS));
1514
1515                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1516                         /* delay required during transition out of D3hot */
1517                         msleep(20);
1518                 break;
1519
1520         case PCI_D3hot:
1521                 /* If there are other clients above don't
1522                    shut down the power */
1523                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1524                         return 0;
1525                 /* Don't shut down the power for emulation and FPGA */
1526                 if (CHIP_REV_IS_SLOW(bp))
1527                         return 0;
1528
1529                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1530                 pmcsr |= 3;
1531
1532                 if (bp->wol)
1533                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1534
1535                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1536                                       pmcsr);
1537
1538                 /* No more memory access after this point until
1539                 * device is brought back to D0.
1540                 */
1541                 break;
1542
1543         default:
1544                 return -EINVAL;
1545         }
1546         return 0;
1547 }
1548
1549 /*
1550  * net_device service functions
1551  */
1552 int bnx2x_poll(struct napi_struct *napi, int budget)
1553 {
1554         int work_done = 0;
1555         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1556                                                  napi);
1557         struct bnx2x *bp = fp->bp;
1558
1559         while (1) {
1560 #ifdef BNX2X_STOP_ON_ERROR
1561                 if (unlikely(bp->panic)) {
1562                         napi_complete(napi);
1563                         return 0;
1564                 }
1565 #endif
1566
1567                 if (bnx2x_has_tx_work(fp))
1568                         bnx2x_tx_int(fp);
1569
1570                 if (bnx2x_has_rx_work(fp)) {
1571                         work_done += bnx2x_rx_int(fp, budget - work_done);
1572
1573                         /* must not complete if we consumed full budget */
1574                         if (work_done >= budget)
1575                                 break;
1576                 }
1577
1578                 /* Fall out from the NAPI loop if needed */
1579                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1580                         bnx2x_update_fpsb_idx(fp);
1581                         /* bnx2x_has_rx_work() reads the status block,
1582                          * thus we need to ensure that status block indices
1583                          * have been actually read (bnx2x_update_fpsb_idx)
1584                          * prior to this check (bnx2x_has_rx_work) so that
1585                          * we won't write the "newer" value of the status block
1586                          * to IGU (if there was a DMA right after
1587                          * bnx2x_has_rx_work and if there is no rmb, the memory
1588                          * reading (bnx2x_update_fpsb_idx) may be postponed
1589                          * to right before bnx2x_ack_sb). In this case there
1590                          * will never be another interrupt until there is
1591                          * another update of the status block, while there
1592                          * is still unhandled work.
1593                          */
1594                         rmb();
1595
1596                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1597                                 napi_complete(napi);
1598                                 /* Re-enable interrupts */
1599                                 DP(NETIF_MSG_HW,
1600                                    "Update index to %d\n", fp->fp_hc_idx);
1601                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1602                                              le16_to_cpu(fp->fp_hc_idx),
1603                                              IGU_INT_ENABLE, 1);
1604                                 break;
1605                         }
1606                 }
1607         }
1608
1609         return work_done;
1610 }
1611
1612 /* we split the first BD into headers and data BDs
1613  * to ease the pain of our fellow microcode engineers
1614  * we use one mapping for both BDs
1615  * So far this has only been observed to happen
1616  * in Other Operating Systems(TM)
1617  */
1618 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1619                                    struct bnx2x_fastpath *fp,
1620                                    struct sw_tx_bd *tx_buf,
1621                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1622                                    u16 bd_prod, int nbd)
1623 {
1624         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1625         struct eth_tx_bd *d_tx_bd;
1626         dma_addr_t mapping;
1627         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1628
1629         /* first fix first BD */
1630         h_tx_bd->nbd = cpu_to_le16(nbd);
1631         h_tx_bd->nbytes = cpu_to_le16(hlen);
1632
1633         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1634            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1635            h_tx_bd->addr_lo, h_tx_bd->nbd);
1636
1637         /* now get a new data BD
1638          * (after the pbd) and fill it */
1639         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1640         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1641
1642         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1643                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1644
1645         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1646         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1647         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1648
1649         /* this marks the BD as one that has no individual mapping */
1650         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1651
1652         DP(NETIF_MSG_TX_QUEUED,
1653            "TSO split data size is %d (%x:%x)\n",
1654            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1655
1656         /* update tx_bd */
1657         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1658
1659         return bd_prod;
1660 }
1661
1662 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1663 {
1664         if (fix > 0)
1665                 csum = (u16) ~csum_fold(csum_sub(csum,
1666                                 csum_partial(t_header - fix, fix, 0)));
1667
1668         else if (fix < 0)
1669                 csum = (u16) ~csum_fold(csum_add(csum,
1670                                 csum_partial(t_header, -fix, 0)));
1671
1672         return swab16(csum);
1673 }
1674
1675 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1676 {
1677         u32 rc;
1678
1679         if (skb->ip_summed != CHECKSUM_PARTIAL)
1680                 rc = XMIT_PLAIN;
1681
1682         else {
1683                 if (skb->protocol == htons(ETH_P_IPV6)) {
1684                         rc = XMIT_CSUM_V6;
1685                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1686                                 rc |= XMIT_CSUM_TCP;
1687
1688                 } else {
1689                         rc = XMIT_CSUM_V4;
1690                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1691                                 rc |= XMIT_CSUM_TCP;
1692                 }
1693         }
1694
1695         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1696                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1697
1698         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1699                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1700
1701         return rc;
1702 }
1703
1704 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1705 /* check if packet requires linearization (packet is too fragmented)
1706    no need to check fragmentation if page size > 8K (there will be no
1707    violation to FW restrictions) */
1708 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1709                              u32 xmit_type)
1710 {
1711         int to_copy = 0;
1712         int hlen = 0;
1713         int first_bd_sz = 0;
1714
1715         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1716         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1717
1718                 if (xmit_type & XMIT_GSO) {
1719                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1720                         /* Check if LSO packet needs to be copied:
1721                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1722                         int wnd_size = MAX_FETCH_BD - 3;
1723                         /* Number of windows to check */
1724                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1725                         int wnd_idx = 0;
1726                         int frag_idx = 0;
1727                         u32 wnd_sum = 0;
1728
1729                         /* Headers length */
1730                         hlen = (int)(skb_transport_header(skb) - skb->data) +
1731                                 tcp_hdrlen(skb);
1732
1733                         /* Amount of data (w/o headers) on linear part of SKB*/
1734                         first_bd_sz = skb_headlen(skb) - hlen;
1735
1736                         wnd_sum  = first_bd_sz;
1737
1738                         /* Calculate the first sum - it's special */
1739                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1740                                 wnd_sum +=
1741                                         skb_shinfo(skb)->frags[frag_idx].size;
1742
1743                         /* If there was data on linear skb data - check it */
1744                         if (first_bd_sz > 0) {
1745                                 if (unlikely(wnd_sum < lso_mss)) {
1746                                         to_copy = 1;
1747                                         goto exit_lbl;
1748                                 }
1749
1750                                 wnd_sum -= first_bd_sz;
1751                         }
1752
1753                         /* Others are easier: run through the frag list and
1754                            check all windows */
1755                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1756                                 wnd_sum +=
1757                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1758
1759                                 if (unlikely(wnd_sum < lso_mss)) {
1760                                         to_copy = 1;
1761                                         break;
1762                                 }
1763                                 wnd_sum -=
1764                                         skb_shinfo(skb)->frags[wnd_idx].size;
1765                         }
1766                 } else {
1767                         /* in non-LSO too fragmented packet should always
1768                            be linearized */
1769                         to_copy = 1;
1770                 }
1771         }
1772
1773 exit_lbl:
1774         if (unlikely(to_copy))
1775                 DP(NETIF_MSG_TX_QUEUED,
1776                    "Linearization IS REQUIRED for %s packet. "
1777                    "num_frags %d  hlen %d  first_bd_sz %d\n",
1778                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1779                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1780
1781         return to_copy;
1782 }
1783 #endif
1784
1785 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1786                                      struct eth_tx_parse_bd_e2 *pbd,
1787                                      u32 xmit_type)
1788 {
1789         pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1790                 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1791         if ((xmit_type & XMIT_GSO_V6) &&
1792             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1793                 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1794 }
1795
1796 /**
1797  * Update PBD in GSO case.
1798  *
1799  * @param skb
1800  * @param tx_start_bd
1801  * @param pbd
1802  * @param xmit_type
1803  */
1804 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1805                                      struct eth_tx_parse_bd_e1x *pbd,
1806                                      u32 xmit_type)
1807 {
1808         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1809         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1810         pbd->tcp_flags = pbd_tcp_flags(skb);
1811
1812         if (xmit_type & XMIT_GSO_V4) {
1813                 pbd->ip_id = swab16(ip_hdr(skb)->id);
1814                 pbd->tcp_pseudo_csum =
1815                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1816                                                   ip_hdr(skb)->daddr,
1817                                                   0, IPPROTO_TCP, 0));
1818
1819         } else
1820                 pbd->tcp_pseudo_csum =
1821                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1822                                                 &ipv6_hdr(skb)->daddr,
1823                                                 0, IPPROTO_TCP, 0));
1824
1825         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1826 }
1827
1828 /**
1829  *
1830  * @param skb
1831  * @param tx_start_bd
1832  * @param pbd_e2
1833  * @param xmit_type
1834  *
1835  * @return header len
1836  */
1837 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1838         struct eth_tx_parse_bd_e2 *pbd,
1839         u32 xmit_type)
1840 {
1841         pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1842                 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1843
1844         pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1845                                           skb->data) / 2) <<
1846                 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1847
1848         return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1849 }
1850
1851 /**
1852  *
1853  * @param skb
1854  * @param tx_start_bd
1855  * @param pbd
1856  * @param xmit_type
1857  *
1858  * @return Header length
1859  */
1860 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1861         struct eth_tx_parse_bd_e1x *pbd,
1862         u32 xmit_type)
1863 {
1864         u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1865
1866         /* for now NS flag is not used in Linux */
1867         pbd->global_data =
1868                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1869                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1870
1871         pbd->ip_hlen_w = (skb_transport_header(skb) -
1872                         skb_network_header(skb)) / 2;
1873
1874         hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1875
1876         pbd->total_hlen_w = cpu_to_le16(hlen);
1877         hlen = hlen*2;
1878
1879         if (xmit_type & XMIT_CSUM_TCP) {
1880                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1881
1882         } else {
1883                 s8 fix = SKB_CS_OFF(skb); /* signed! */
1884
1885                 DP(NETIF_MSG_TX_QUEUED,
1886                    "hlen %d  fix %d  csum before fix %x\n",
1887                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1888
1889                 /* HW bug: fixup the CSUM */
1890                 pbd->tcp_pseudo_csum =
1891                         bnx2x_csum_fix(skb_transport_header(skb),
1892                                        SKB_CS(skb), fix);
1893
1894                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1895                    pbd->tcp_pseudo_csum);
1896         }
1897
1898         return hlen;
1899 }
1900
1901 /* called with netif_tx_lock
1902  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1903  * netif_wake_queue()
1904  */
1905 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1906 {
1907         struct bnx2x *bp = netdev_priv(dev);
1908         struct bnx2x_fastpath *fp;
1909         struct netdev_queue *txq;
1910         struct sw_tx_bd *tx_buf;
1911         struct eth_tx_start_bd *tx_start_bd;
1912         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1913         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1914         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1915         u16 pkt_prod, bd_prod;
1916         int nbd, fp_index;
1917         dma_addr_t mapping;
1918         u32 xmit_type = bnx2x_xmit_type(bp, skb);
1919         int i;
1920         u8 hlen = 0;
1921         __le16 pkt_size = 0;
1922         struct ethhdr *eth;
1923         u8 mac_type = UNICAST_ADDRESS;
1924
1925 #ifdef BNX2X_STOP_ON_ERROR
1926         if (unlikely(bp->panic))
1927                 return NETDEV_TX_BUSY;
1928 #endif
1929
1930         fp_index = skb_get_queue_mapping(skb);
1931         txq = netdev_get_tx_queue(dev, fp_index);
1932
1933         fp = &bp->fp[fp_index];
1934
1935         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1936                 fp->eth_q_stats.driver_xoff++;
1937                 netif_tx_stop_queue(txq);
1938                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1939                 return NETDEV_TX_BUSY;
1940         }
1941
1942         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
1943                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
1944            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1945            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1946
1947         eth = (struct ethhdr *)skb->data;
1948
1949         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1950         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1951                 if (is_broadcast_ether_addr(eth->h_dest))
1952                         mac_type = BROADCAST_ADDRESS;
1953                 else
1954                         mac_type = MULTICAST_ADDRESS;
1955         }
1956
1957 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1958         /* First, check if we need to linearize the skb (due to FW
1959            restrictions). No need to check fragmentation if page size > 8K
1960            (there will be no violation to FW restrictions) */
1961         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1962                 /* Statistics of linearization */
1963                 bp->lin_cnt++;
1964                 if (skb_linearize(skb) != 0) {
1965                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1966                            "silently dropping this SKB\n");
1967                         dev_kfree_skb_any(skb);
1968                         return NETDEV_TX_OK;
1969                 }
1970         }
1971 #endif
1972
1973         /*
1974         Please read carefully. First we use one BD which we mark as start,
1975         then we have a parsing info BD (used for TSO or xsum),
1976         and only then we have the rest of the TSO BDs.
1977         (don't forget to mark the last one as last,
1978         and to unmap only AFTER you write to the BD ...)
1979         And above all, all pdb sizes are in words - NOT DWORDS!
1980         */
1981
1982         pkt_prod = fp->tx_pkt_prod++;
1983         bd_prod = TX_BD(fp->tx_bd_prod);
1984
1985         /* get a tx_buf and first BD */
1986         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1987         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1988
1989         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1990         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
1991                  mac_type);
1992
1993         /* header nbd */
1994         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
1995
1996         /* remember the first BD of the packet */
1997         tx_buf->first_bd = fp->tx_bd_prod;
1998         tx_buf->skb = skb;
1999         tx_buf->flags = 0;
2000
2001         DP(NETIF_MSG_TX_QUEUED,
2002            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2003            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2004
2005         if (vlan_tx_tag_present(skb)) {
2006                 tx_start_bd->vlan_or_ethertype =
2007                     cpu_to_le16(vlan_tx_tag_get(skb));
2008                 tx_start_bd->bd_flags.as_bitfield |=
2009                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2010         } else
2011                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2012
2013         /* turn on parsing and get a BD */
2014         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2015
2016         if (xmit_type & XMIT_CSUM) {
2017                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2018
2019                 if (xmit_type & XMIT_CSUM_V4)
2020                         tx_start_bd->bd_flags.as_bitfield |=
2021                                                 ETH_TX_BD_FLAGS_IP_CSUM;
2022                 else
2023                         tx_start_bd->bd_flags.as_bitfield |=
2024                                                 ETH_TX_BD_FLAGS_IPV6;
2025
2026                 if (!(xmit_type & XMIT_CSUM_TCP))
2027                         tx_start_bd->bd_flags.as_bitfield |=
2028                                                 ETH_TX_BD_FLAGS_IS_UDP;
2029         }
2030
2031         if (CHIP_IS_E2(bp)) {
2032                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2033                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2034                 /* Set PBD in checksum offload case */
2035                 if (xmit_type & XMIT_CSUM)
2036                         hlen = bnx2x_set_pbd_csum_e2(bp,
2037                                                      skb, pbd_e2, xmit_type);
2038         } else {
2039                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2040                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2041                 /* Set PBD in checksum offload case */
2042                 if (xmit_type & XMIT_CSUM)
2043                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2044
2045         }
2046
2047         /* Map skb linear data for DMA */
2048         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2049                                  skb_headlen(skb), DMA_TO_DEVICE);
2050
2051         /* Setup the data pointer of the first BD of the packet */
2052         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2053         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2054         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2055         tx_start_bd->nbd = cpu_to_le16(nbd);
2056         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2057         pkt_size = tx_start_bd->nbytes;
2058
2059         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2060            "  nbytes %d  flags %x  vlan %x\n",
2061            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2062            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2063            tx_start_bd->bd_flags.as_bitfield,
2064            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2065
2066         if (xmit_type & XMIT_GSO) {
2067
2068                 DP(NETIF_MSG_TX_QUEUED,
2069                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2070                    skb->len, hlen, skb_headlen(skb),
2071                    skb_shinfo(skb)->gso_size);
2072
2073                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2074
2075                 if (unlikely(skb_headlen(skb) > hlen))
2076                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2077                                                  hlen, bd_prod, ++nbd);
2078                 if (CHIP_IS_E2(bp))
2079                         bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2080                 else
2081                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2082         }
2083         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2084
2085         /* Handle fragmented skb */
2086         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2087                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2088
2089                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2090                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2091                 if (total_pkt_bd == NULL)
2092                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2093
2094                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2095                                        frag->page_offset,
2096                                        frag->size, DMA_TO_DEVICE);
2097
2098                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2099                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2100                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2101                 le16_add_cpu(&pkt_size, frag->size);
2102
2103                 DP(NETIF_MSG_TX_QUEUED,
2104                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2105                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2106                    le16_to_cpu(tx_data_bd->nbytes));
2107         }
2108
2109         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2110
2111         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2112
2113         /* now send a tx doorbell, counting the next BD
2114          * if the packet contains or ends with it
2115          */
2116         if (TX_BD_POFF(bd_prod) < nbd)
2117                 nbd++;
2118
2119         if (total_pkt_bd != NULL)
2120                 total_pkt_bd->total_pkt_bytes = pkt_size;
2121
2122         if (pbd_e1x)
2123                 DP(NETIF_MSG_TX_QUEUED,
2124                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2125                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2126                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2127                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2128                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2129                     le16_to_cpu(pbd_e1x->total_hlen_w));
2130         if (pbd_e2)
2131                 DP(NETIF_MSG_TX_QUEUED,
2132                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2133                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2134                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2135                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2136                    pbd_e2->parsing_data);
2137         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2138
2139         /*
2140          * Make sure that the BD data is updated before updating the producer
2141          * since FW might read the BD right after the producer is updated.
2142          * This is only applicable for weak-ordered memory model archs such
2143          * as IA-64. The following barrier is also mandatory since FW will
2144          * assumes packets must have BDs.
2145          */
2146         wmb();
2147
2148         fp->tx_db.data.prod += nbd;
2149         barrier();
2150
2151         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2152
2153         mmiowb();
2154
2155         fp->tx_bd_prod += nbd;
2156
2157         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2158                 netif_tx_stop_queue(txq);
2159
2160                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2161                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2162                  * fp->bd_tx_cons */
2163                 smp_mb();
2164
2165                 fp->eth_q_stats.driver_xoff++;
2166                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2167                         netif_tx_wake_queue(txq);
2168         }
2169         fp->tx_pkt++;
2170
2171         return NETDEV_TX_OK;
2172 }
2173
2174 /* called with rtnl_lock */
2175 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2176 {
2177         struct sockaddr *addr = p;
2178         struct bnx2x *bp = netdev_priv(dev);
2179
2180         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2181                 return -EINVAL;
2182
2183         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2184         if (netif_running(dev))
2185                 bnx2x_set_eth_mac(bp, 1);
2186
2187         return 0;
2188 }
2189
2190
2191 static int bnx2x_setup_irqs(struct bnx2x *bp)
2192 {
2193         int rc = 0;
2194         if (bp->flags & USING_MSIX_FLAG) {
2195                 rc = bnx2x_req_msix_irqs(bp);
2196                 if (rc)
2197                         return rc;
2198         } else {
2199                 bnx2x_ack_int(bp);
2200                 rc = bnx2x_req_irq(bp);
2201                 if (rc) {
2202                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
2203                         return rc;
2204                 }
2205                 if (bp->flags & USING_MSI_FLAG) {
2206                         bp->dev->irq = bp->pdev->irq;
2207                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
2208                                bp->pdev->irq);
2209                 }
2210         }
2211
2212         return 0;
2213 }
2214
2215 void bnx2x_free_mem_bp(struct bnx2x *bp)
2216 {
2217         kfree(bp->fp);
2218         kfree(bp->msix_table);
2219         kfree(bp->ilt);
2220 }
2221
2222 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2223 {
2224         struct bnx2x_fastpath *fp;
2225         struct msix_entry *tbl;
2226         struct bnx2x_ilt *ilt;
2227
2228         /* fp array */
2229         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2230         if (!fp)
2231                 goto alloc_err;
2232         bp->fp = fp;
2233
2234         /* msix table */
2235         tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2236                                   GFP_KERNEL);
2237         if (!tbl)
2238                 goto alloc_err;
2239         bp->msix_table = tbl;
2240
2241         /* ilt */
2242         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2243         if (!ilt)
2244                 goto alloc_err;
2245         bp->ilt = ilt;
2246
2247         return 0;
2248 alloc_err:
2249         bnx2x_free_mem_bp(bp);
2250         return -ENOMEM;
2251
2252 }
2253
2254 /* called with rtnl_lock */
2255 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2256 {
2257         struct bnx2x *bp = netdev_priv(dev);
2258         int rc = 0;
2259
2260         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2261                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2262                 return -EAGAIN;
2263         }
2264
2265         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2266             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2267                 return -EINVAL;
2268
2269         /* This does not race with packet allocation
2270          * because the actual alloc size is
2271          * only updated as part of load
2272          */
2273         dev->mtu = new_mtu;
2274
2275         if (netif_running(dev)) {
2276                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2277                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2278         }
2279
2280         return rc;
2281 }
2282
2283 void bnx2x_tx_timeout(struct net_device *dev)
2284 {
2285         struct bnx2x *bp = netdev_priv(dev);
2286
2287 #ifdef BNX2X_STOP_ON_ERROR
2288         if (!bp->panic)
2289                 bnx2x_panic();
2290 #endif
2291         /* This allows the netif to be shutdown gracefully before resetting */
2292         schedule_delayed_work(&bp->reset_task, 0);
2293 }
2294
2295 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2296 {
2297         struct net_device *dev = pci_get_drvdata(pdev);
2298         struct bnx2x *bp;
2299
2300         if (!dev) {
2301                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2302                 return -ENODEV;
2303         }
2304         bp = netdev_priv(dev);
2305
2306         rtnl_lock();
2307
2308         pci_save_state(pdev);
2309
2310         if (!netif_running(dev)) {
2311                 rtnl_unlock();
2312                 return 0;
2313         }
2314
2315         netif_device_detach(dev);
2316
2317         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2318
2319         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2320
2321         rtnl_unlock();
2322
2323         return 0;
2324 }
2325
2326 int bnx2x_resume(struct pci_dev *pdev)
2327 {
2328         struct net_device *dev = pci_get_drvdata(pdev);
2329         struct bnx2x *bp;
2330         int rc;
2331
2332         if (!dev) {
2333                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2334                 return -ENODEV;
2335         }
2336         bp = netdev_priv(dev);
2337
2338         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2339                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2340                 return -EAGAIN;
2341         }
2342
2343         rtnl_lock();
2344
2345         pci_restore_state(pdev);
2346
2347         if (!netif_running(dev)) {
2348                 rtnl_unlock();
2349                 return 0;
2350         }
2351
2352         bnx2x_set_power_state(bp, PCI_D0);
2353         netif_device_attach(dev);
2354
2355         /* Since the chip was reset, clear the FW sequence number */
2356         bp->fw_seq = 0;
2357         rc = bnx2x_nic_load(bp, LOAD_OPEN);
2358
2359         rtnl_unlock();
2360
2361         return rc;
2362 }