bnx2x: put start bd csum in separate function
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
21 #include <linux/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
27
28 #include "bnx2x_init.h"
29
30 static int bnx2x_setup_irqs(struct bnx2x *bp);
31
32 /**
33  * bnx2x_bz_fp - zero content of the fastpath structure.
34  *
35  * @bp:         driver handle
36  * @index:      fastpath index to be zeroed
37  *
38  * Makes sure the contents of the bp->fp[index].napi is kept
39  * intact.
40  */
41 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42 {
43         struct bnx2x_fastpath *fp = &bp->fp[index];
44         struct napi_struct orig_napi = fp->napi;
45         /* bzero bnx2x_fastpath contents */
46         memset(fp, 0, sizeof(*fp));
47
48         /* Restore the NAPI object as it has been already initialized */
49         fp->napi = orig_napi;
50 }
51
52 /**
53  * bnx2x_move_fp - move content of the fastpath structure.
54  *
55  * @bp:         driver handle
56  * @from:       source FP index
57  * @to:         destination FP index
58  *
59  * Makes sure the contents of the bp->fp[to].napi is kept
60  * intact.
61  */
62 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
63 {
64         struct bnx2x_fastpath *from_fp = &bp->fp[from];
65         struct bnx2x_fastpath *to_fp = &bp->fp[to];
66         struct napi_struct orig_napi = to_fp->napi;
67         /* Move bnx2x_fastpath contents */
68         memcpy(to_fp, from_fp, sizeof(*to_fp));
69         to_fp->index = to;
70
71         /* Restore the NAPI object as it has been already initialized */
72         to_fp->napi = orig_napi;
73 }
74
75 /* free skb in the packet ring at pos idx
76  * return idx of last bd freed
77  */
78 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
79                              u16 idx)
80 {
81         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
82         struct eth_tx_start_bd *tx_start_bd;
83         struct eth_tx_bd *tx_data_bd;
84         struct sk_buff *skb = tx_buf->skb;
85         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
86         int nbd;
87
88         /* prefetch skb end pointer to speedup dev_kfree_skb() */
89         prefetch(&skb->end);
90
91         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
92            idx, tx_buf, skb);
93
94         /* unmap first bd */
95         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
96         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
97         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
98                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
99
100         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
101 #ifdef BNX2X_STOP_ON_ERROR
102         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
103                 BNX2X_ERR("BAD nbd!\n");
104                 bnx2x_panic();
105         }
106 #endif
107         new_cons = nbd + tx_buf->first_bd;
108
109         /* Get the next bd */
110         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
111
112         /* Skip a parse bd... */
113         --nbd;
114         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
115
116         /* ...and the TSO split header bd since they have no mapping */
117         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
118                 --nbd;
119                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
120         }
121
122         /* now free frags */
123         while (nbd > 0) {
124
125                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
126                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
127                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
128                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
129                 if (--nbd)
130                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
131         }
132
133         /* release skb */
134         WARN_ON(!skb);
135         dev_kfree_skb_any(skb);
136         tx_buf->first_bd = 0;
137         tx_buf->skb = NULL;
138
139         return new_cons;
140 }
141
142 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
143 {
144         struct bnx2x *bp = fp->bp;
145         struct netdev_queue *txq;
146         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
147
148 #ifdef BNX2X_STOP_ON_ERROR
149         if (unlikely(bp->panic))
150                 return -1;
151 #endif
152
153         txq = netdev_get_tx_queue(bp->dev, fp->index);
154         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
155         sw_cons = fp->tx_pkt_cons;
156
157         while (sw_cons != hw_cons) {
158                 u16 pkt_cons;
159
160                 pkt_cons = TX_BD(sw_cons);
161
162                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
163                                       " pkt_cons %u\n",
164                    fp->index, hw_cons, sw_cons, pkt_cons);
165
166                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
167                 sw_cons++;
168         }
169
170         fp->tx_pkt_cons = sw_cons;
171         fp->tx_bd_cons = bd_cons;
172
173         /* Need to make the tx_bd_cons update visible to start_xmit()
174          * before checking for netif_tx_queue_stopped().  Without the
175          * memory barrier, there is a small possibility that
176          * start_xmit() will miss it and cause the queue to be stopped
177          * forever.
178          */
179         smp_mb();
180
181         if (unlikely(netif_tx_queue_stopped(txq))) {
182                 /* Taking tx_lock() is needed to prevent reenabling the queue
183                  * while it's empty. This could have happen if rx_action() gets
184                  * suspended in bnx2x_tx_int() after the condition before
185                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
186                  *
187                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
188                  * sends some packets consuming the whole queue again->
189                  * stops the queue
190                  */
191
192                 __netif_tx_lock(txq, smp_processor_id());
193
194                 if ((netif_tx_queue_stopped(txq)) &&
195                     (bp->state == BNX2X_STATE_OPEN) &&
196                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
197                         netif_tx_wake_queue(txq);
198
199                 __netif_tx_unlock(txq);
200         }
201         return 0;
202 }
203
204 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
205                                              u16 idx)
206 {
207         u16 last_max = fp->last_max_sge;
208
209         if (SUB_S16(idx, last_max) > 0)
210                 fp->last_max_sge = idx;
211 }
212
213 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214                                   struct eth_fast_path_rx_cqe *fp_cqe)
215 {
216         struct bnx2x *bp = fp->bp;
217         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
218                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
219                       SGE_PAGE_SHIFT;
220         u16 last_max, last_elem, first_elem;
221         u16 delta = 0;
222         u16 i;
223
224         if (!sge_len)
225                 return;
226
227         /* First mark all used pages */
228         for (i = 0; i < sge_len; i++)
229                 SGE_MASK_CLEAR_BIT(fp,
230                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
231
232         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
233            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
234
235         /* Here we assume that the last SGE index is the biggest */
236         prefetch((void *)(fp->sge_mask));
237         bnx2x_update_last_max_sge(fp,
238                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
239
240         last_max = RX_SGE(fp->last_max_sge);
241         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
242         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
243
244         /* If ring is not full */
245         if (last_elem + 1 != first_elem)
246                 last_elem++;
247
248         /* Now update the prod */
249         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
250                 if (likely(fp->sge_mask[i]))
251                         break;
252
253                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
254                 delta += RX_SGE_MASK_ELEM_SZ;
255         }
256
257         if (delta > 0) {
258                 fp->rx_sge_prod += delta;
259                 /* clear page-end entries */
260                 bnx2x_clear_sge_mask_next_elems(fp);
261         }
262
263         DP(NETIF_MSG_RX_STATUS,
264            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
265            fp->last_max_sge, fp->rx_sge_prod);
266 }
267
268 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
269                             struct sk_buff *skb, u16 cons, u16 prod)
270 {
271         struct bnx2x *bp = fp->bp;
272         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
273         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
274         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
275         dma_addr_t mapping;
276
277         /* move empty skb from pool to prod and map it */
278         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
279         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
280                                  fp->rx_buf_size, DMA_FROM_DEVICE);
281         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
282
283         /* move partial skb from cons to pool (don't unmap yet) */
284         fp->tpa_pool[queue] = *cons_rx_buf;
285
286         /* mark bin state as start - print error if current state != stop */
287         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
288                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
289
290         fp->tpa_state[queue] = BNX2X_TPA_START;
291
292         /* point prod_bd to new skb */
293         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
294         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
295
296 #ifdef BNX2X_STOP_ON_ERROR
297         fp->tpa_queue_used |= (1 << queue);
298 #ifdef _ASM_GENERIC_INT_L64_H
299         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
300 #else
301         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
302 #endif
303            fp->tpa_queue_used);
304 #endif
305 }
306
307 /* Timestamp option length allowed for TPA aggregation:
308  *
309  *              nop nop kind length echo val
310  */
311 #define TPA_TSTAMP_OPT_LEN      12
312 /**
313  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
314  *
315  * @bp:                 driver handle
316  * @parsing_flags:      parsing flags from the START CQE
317  * @len_on_bd:          total length of the first packet for the
318  *                      aggregation.
319  *
320  * Approximate value of the MSS for this aggregation calculated using
321  * the first packet of it.
322  */
323 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
324                                     u16 len_on_bd)
325 {
326         /* TPA arrgregation won't have an IP options and TCP options
327          * other than timestamp.
328          */
329         u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
330
331
332         /* Check if there was a TCP timestamp, if there is it's will
333          * always be 12 bytes length: nop nop kind length echo val.
334          *
335          * Otherwise FW would close the aggregation.
336          */
337         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
338                 hdrs_len += TPA_TSTAMP_OPT_LEN;
339
340         return len_on_bd - hdrs_len;
341 }
342
343 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
344                                struct sk_buff *skb,
345                                struct eth_fast_path_rx_cqe *fp_cqe,
346                                u16 cqe_idx, u16 parsing_flags)
347 {
348         struct sw_rx_page *rx_pg, old_rx_pg;
349         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
350         u32 i, frag_len, frag_size, pages;
351         int err;
352         int j;
353
354         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
355         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
356
357         /* This is needed in order to enable forwarding support */
358         if (frag_size)
359                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
360                                                               len_on_bd);
361
362 #ifdef BNX2X_STOP_ON_ERROR
363         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
364                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
365                           pages, cqe_idx);
366                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
367                           fp_cqe->pkt_len, len_on_bd);
368                 bnx2x_panic();
369                 return -EINVAL;
370         }
371 #endif
372
373         /* Run through the SGL and compose the fragmented skb */
374         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
375                 u16 sge_idx =
376                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
377
378                 /* FW gives the indices of the SGE as if the ring is an array
379                    (meaning that "next" element will consume 2 indices) */
380                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
381                 rx_pg = &fp->rx_page_ring[sge_idx];
382                 old_rx_pg = *rx_pg;
383
384                 /* If we fail to allocate a substitute page, we simply stop
385                    where we are and drop the whole packet */
386                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
387                 if (unlikely(err)) {
388                         fp->eth_q_stats.rx_skb_alloc_failed++;
389                         return err;
390                 }
391
392                 /* Unmap the page as we r going to pass it to the stack */
393                 dma_unmap_page(&bp->pdev->dev,
394                                dma_unmap_addr(&old_rx_pg, mapping),
395                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
396
397                 /* Add one frag and update the appropriate fields in the skb */
398                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
399
400                 skb->data_len += frag_len;
401                 skb->truesize += frag_len;
402                 skb->len += frag_len;
403
404                 frag_size -= frag_len;
405         }
406
407         return 0;
408 }
409
410 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
411                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
412                            u16 cqe_idx)
413 {
414         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
415         struct sk_buff *skb = rx_buf->skb;
416         /* alloc new skb */
417         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
418
419         /* Unmap skb in the pool anyway, as we are going to change
420            pool entry status to BNX2X_TPA_STOP even if new skb allocation
421            fails. */
422         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
423                          fp->rx_buf_size, DMA_FROM_DEVICE);
424
425         if (likely(new_skb)) {
426                 /* fix ip xsum and give it to the stack */
427                 /* (no need to map the new skb) */
428                 u16 parsing_flags =
429                         le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
430
431                 prefetch(skb);
432                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
433
434 #ifdef BNX2X_STOP_ON_ERROR
435                 if (pad + len > fp->rx_buf_size) {
436                         BNX2X_ERR("skb_put is about to fail...  "
437                                   "pad %d  len %d  rx_buf_size %d\n",
438                                   pad, len, fp->rx_buf_size);
439                         bnx2x_panic();
440                         return;
441                 }
442 #endif
443
444                 skb_reserve(skb, pad);
445                 skb_put(skb, len);
446
447                 skb->protocol = eth_type_trans(skb, bp->dev);
448                 skb->ip_summed = CHECKSUM_UNNECESSARY;
449
450                 {
451                         struct iphdr *iph;
452
453                         iph = (struct iphdr *)skb->data;
454                         iph->check = 0;
455                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
456                 }
457
458                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
459                                          &cqe->fast_path_cqe, cqe_idx,
460                                          parsing_flags)) {
461                         if (parsing_flags & PARSING_FLAGS_VLAN)
462                                 __vlan_hwaccel_put_tag(skb,
463                                                  le16_to_cpu(cqe->fast_path_cqe.
464                                                              vlan_tag));
465                         napi_gro_receive(&fp->napi, skb);
466                 } else {
467                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
468                            " - dropping packet!\n");
469                         dev_kfree_skb_any(skb);
470                 }
471
472
473                 /* put new skb in bin */
474                 fp->tpa_pool[queue].skb = new_skb;
475
476         } else {
477                 /* else drop the packet and keep the buffer in the bin */
478                 DP(NETIF_MSG_RX_STATUS,
479                    "Failed to allocate new skb - dropping packet!\n");
480                 fp->eth_q_stats.rx_skb_alloc_failed++;
481         }
482
483         fp->tpa_state[queue] = BNX2X_TPA_STOP;
484 }
485
486 /* Set Toeplitz hash value in the skb using the value from the
487  * CQE (calculated by HW).
488  */
489 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
490                                         struct sk_buff *skb)
491 {
492         /* Set Toeplitz hash from CQE */
493         if ((bp->dev->features & NETIF_F_RXHASH) &&
494             (cqe->fast_path_cqe.status_flags &
495              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
496                 skb->rxhash =
497                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
498 }
499
500 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
501 {
502         struct bnx2x *bp = fp->bp;
503         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
504         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
505         int rx_pkt = 0;
506
507 #ifdef BNX2X_STOP_ON_ERROR
508         if (unlikely(bp->panic))
509                 return 0;
510 #endif
511
512         /* CQ "next element" is of the size of the regular element,
513            that's why it's ok here */
514         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
515         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
516                 hw_comp_cons++;
517
518         bd_cons = fp->rx_bd_cons;
519         bd_prod = fp->rx_bd_prod;
520         bd_prod_fw = bd_prod;
521         sw_comp_cons = fp->rx_comp_cons;
522         sw_comp_prod = fp->rx_comp_prod;
523
524         /* Memory barrier necessary as speculative reads of the rx
525          * buffer can be ahead of the index in the status block
526          */
527         rmb();
528
529         DP(NETIF_MSG_RX_STATUS,
530            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
531            fp->index, hw_comp_cons, sw_comp_cons);
532
533         while (sw_comp_cons != hw_comp_cons) {
534                 struct sw_rx_bd *rx_buf = NULL;
535                 struct sk_buff *skb;
536                 union eth_rx_cqe *cqe;
537                 u8 cqe_fp_flags;
538                 u16 len, pad;
539
540                 comp_ring_cons = RCQ_BD(sw_comp_cons);
541                 bd_prod = RX_BD(bd_prod);
542                 bd_cons = RX_BD(bd_cons);
543
544                 /* Prefetch the page containing the BD descriptor
545                    at producer's index. It will be needed when new skb is
546                    allocated */
547                 prefetch((void *)(PAGE_ALIGN((unsigned long)
548                                              (&fp->rx_desc_ring[bd_prod])) -
549                                   PAGE_SIZE + 1));
550
551                 cqe = &fp->rx_comp_ring[comp_ring_cons];
552                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
553
554                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
555                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
556                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
557                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
558                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
559                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
560
561                 /* is this a slowpath msg? */
562                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
563                         bnx2x_sp_event(fp, cqe);
564                         goto next_cqe;
565
566                 /* this is an rx packet */
567                 } else {
568                         rx_buf = &fp->rx_buf_ring[bd_cons];
569                         skb = rx_buf->skb;
570                         prefetch(skb);
571                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
572                         pad = cqe->fast_path_cqe.placement_offset;
573
574                         /* - If CQE is marked both TPA_START and TPA_END it is
575                          *   a non-TPA CQE.
576                          * - FP CQE will always have either TPA_START or/and
577                          *   TPA_STOP flags set.
578                          */
579                         if ((!fp->disable_tpa) &&
580                             (TPA_TYPE(cqe_fp_flags) !=
581                                         (TPA_TYPE_START | TPA_TYPE_END))) {
582                                 u16 queue = cqe->fast_path_cqe.queue_index;
583
584                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
585                                         DP(NETIF_MSG_RX_STATUS,
586                                            "calling tpa_start on queue %d\n",
587                                            queue);
588
589                                         bnx2x_tpa_start(fp, queue, skb,
590                                                         bd_cons, bd_prod);
591
592                                         /* Set Toeplitz hash for an LRO skb */
593                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
594
595                                         goto next_rx;
596                                 } else { /* TPA_STOP */
597                                         DP(NETIF_MSG_RX_STATUS,
598                                            "calling tpa_stop on queue %d\n",
599                                            queue);
600
601                                         if (!BNX2X_RX_SUM_FIX(cqe))
602                                                 BNX2X_ERR("STOP on none TCP "
603                                                           "data\n");
604
605                                         /* This is a size of the linear data
606                                            on this skb */
607                                         len = le16_to_cpu(cqe->fast_path_cqe.
608                                                                 len_on_bd);
609                                         bnx2x_tpa_stop(bp, fp, queue, pad,
610                                                     len, cqe, comp_ring_cons);
611 #ifdef BNX2X_STOP_ON_ERROR
612                                         if (bp->panic)
613                                                 return 0;
614 #endif
615
616                                         bnx2x_update_sge_prod(fp,
617                                                         &cqe->fast_path_cqe);
618                                         goto next_cqe;
619                                 }
620                         }
621
622                         dma_sync_single_for_device(&bp->pdev->dev,
623                                         dma_unmap_addr(rx_buf, mapping),
624                                                    pad + RX_COPY_THRESH,
625                                                    DMA_FROM_DEVICE);
626                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
627
628                         /* is this an error packet? */
629                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
630                                 DP(NETIF_MSG_RX_ERR,
631                                    "ERROR  flags %x  rx packet %u\n",
632                                    cqe_fp_flags, sw_comp_cons);
633                                 fp->eth_q_stats.rx_err_discard_pkt++;
634                                 goto reuse_rx;
635                         }
636
637                         /* Since we don't have a jumbo ring
638                          * copy small packets if mtu > 1500
639                          */
640                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
641                             (len <= RX_COPY_THRESH)) {
642                                 struct sk_buff *new_skb;
643
644                                 new_skb = netdev_alloc_skb(bp->dev,
645                                                            len + pad);
646                                 if (new_skb == NULL) {
647                                         DP(NETIF_MSG_RX_ERR,
648                                            "ERROR  packet dropped "
649                                            "because of alloc failure\n");
650                                         fp->eth_q_stats.rx_skb_alloc_failed++;
651                                         goto reuse_rx;
652                                 }
653
654                                 /* aligned copy */
655                                 skb_copy_from_linear_data_offset(skb, pad,
656                                                     new_skb->data + pad, len);
657                                 skb_reserve(new_skb, pad);
658                                 skb_put(new_skb, len);
659
660                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
661
662                                 skb = new_skb;
663
664                         } else
665                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
666                                 dma_unmap_single(&bp->pdev->dev,
667                                         dma_unmap_addr(rx_buf, mapping),
668                                                  fp->rx_buf_size,
669                                                  DMA_FROM_DEVICE);
670                                 skb_reserve(skb, pad);
671                                 skb_put(skb, len);
672
673                         } else {
674                                 DP(NETIF_MSG_RX_ERR,
675                                    "ERROR  packet dropped because "
676                                    "of alloc failure\n");
677                                 fp->eth_q_stats.rx_skb_alloc_failed++;
678 reuse_rx:
679                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
680                                 goto next_rx;
681                         }
682
683                         skb->protocol = eth_type_trans(skb, bp->dev);
684
685                         /* Set Toeplitz hash for a none-LRO skb */
686                         bnx2x_set_skb_rxhash(bp, cqe, skb);
687
688                         skb_checksum_none_assert(skb);
689
690                         if (bp->dev->features & NETIF_F_RXCSUM) {
691                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
692                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
693                                 else
694                                         fp->eth_q_stats.hw_csum_err++;
695                         }
696                 }
697
698                 skb_record_rx_queue(skb, fp->index);
699
700                 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
701                      PARSING_FLAGS_VLAN)
702                         __vlan_hwaccel_put_tag(skb,
703                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
704                 napi_gro_receive(&fp->napi, skb);
705
706
707 next_rx:
708                 rx_buf->skb = NULL;
709
710                 bd_cons = NEXT_RX_IDX(bd_cons);
711                 bd_prod = NEXT_RX_IDX(bd_prod);
712                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
713                 rx_pkt++;
714 next_cqe:
715                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
716                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
717
718                 if (rx_pkt == budget)
719                         break;
720         } /* while */
721
722         fp->rx_bd_cons = bd_cons;
723         fp->rx_bd_prod = bd_prod_fw;
724         fp->rx_comp_cons = sw_comp_cons;
725         fp->rx_comp_prod = sw_comp_prod;
726
727         /* Update producers */
728         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
729                              fp->rx_sge_prod);
730
731         fp->rx_pkt += rx_pkt;
732         fp->rx_calls++;
733
734         return rx_pkt;
735 }
736
737 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
738 {
739         struct bnx2x_fastpath *fp = fp_cookie;
740         struct bnx2x *bp = fp->bp;
741
742         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
743                          "[fp %d fw_sd %d igusb %d]\n",
744            fp->index, fp->fw_sb_id, fp->igu_sb_id);
745         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
746
747 #ifdef BNX2X_STOP_ON_ERROR
748         if (unlikely(bp->panic))
749                 return IRQ_HANDLED;
750 #endif
751
752         /* Handle Rx and Tx according to MSI-X vector */
753         prefetch(fp->rx_cons_sb);
754         prefetch(fp->tx_cons_sb);
755         prefetch(&fp->sb_running_index[SM_RX_ID]);
756         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
757
758         return IRQ_HANDLED;
759 }
760
761 /* HW Lock for shared dual port PHYs */
762 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
763 {
764         mutex_lock(&bp->port.phy_mutex);
765
766         if (bp->port.need_hw_lock)
767                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
768 }
769
770 void bnx2x_release_phy_lock(struct bnx2x *bp)
771 {
772         if (bp->port.need_hw_lock)
773                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
774
775         mutex_unlock(&bp->port.phy_mutex);
776 }
777
778 /* calculates MF speed according to current linespeed and MF configuration */
779 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
780 {
781         u16 line_speed = bp->link_vars.line_speed;
782         if (IS_MF(bp)) {
783                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
784                                                    bp->mf_config[BP_VN(bp)]);
785
786                 /* Calculate the current MAX line speed limit for the MF
787                  * devices
788                  */
789                 if (IS_MF_SI(bp))
790                         line_speed = (line_speed * maxCfg) / 100;
791                 else { /* SD mode */
792                         u16 vn_max_rate = maxCfg * 100;
793
794                         if (vn_max_rate < line_speed)
795                                 line_speed = vn_max_rate;
796                 }
797         }
798
799         return line_speed;
800 }
801
802 /**
803  * bnx2x_fill_report_data - fill link report data to report
804  *
805  * @bp:         driver handle
806  * @data:       link state to update
807  *
808  * It uses a none-atomic bit operations because is called under the mutex.
809  */
810 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
811                                           struct bnx2x_link_report_data *data)
812 {
813         u16 line_speed = bnx2x_get_mf_speed(bp);
814
815         memset(data, 0, sizeof(*data));
816
817         /* Fill the report data: efective line speed */
818         data->line_speed = line_speed;
819
820         /* Link is down */
821         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
822                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
823                           &data->link_report_flags);
824
825         /* Full DUPLEX */
826         if (bp->link_vars.duplex == DUPLEX_FULL)
827                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
828
829         /* Rx Flow Control is ON */
830         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
831                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
832
833         /* Tx Flow Control is ON */
834         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
835                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
836 }
837
838 /**
839  * bnx2x_link_report - report link status to OS.
840  *
841  * @bp:         driver handle
842  *
843  * Calls the __bnx2x_link_report() under the same locking scheme
844  * as a link/PHY state managing code to ensure a consistent link
845  * reporting.
846  */
847
848 void bnx2x_link_report(struct bnx2x *bp)
849 {
850         bnx2x_acquire_phy_lock(bp);
851         __bnx2x_link_report(bp);
852         bnx2x_release_phy_lock(bp);
853 }
854
855 /**
856  * __bnx2x_link_report - report link status to OS.
857  *
858  * @bp:         driver handle
859  *
860  * None atomic inmlementation.
861  * Should be called under the phy_lock.
862  */
863 void __bnx2x_link_report(struct bnx2x *bp)
864 {
865         struct bnx2x_link_report_data cur_data;
866
867         /* reread mf_cfg */
868         if (!CHIP_IS_E1(bp))
869                 bnx2x_read_mf_cfg(bp);
870
871         /* Read the current link report info */
872         bnx2x_fill_report_data(bp, &cur_data);
873
874         /* Don't report link down or exactly the same link status twice */
875         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
876             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
877                       &bp->last_reported_link.link_report_flags) &&
878              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
879                       &cur_data.link_report_flags)))
880                 return;
881
882         bp->link_cnt++;
883
884         /* We are going to report a new link parameters now -
885          * remember the current data for the next time.
886          */
887         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
888
889         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
890                      &cur_data.link_report_flags)) {
891                 netif_carrier_off(bp->dev);
892                 netdev_err(bp->dev, "NIC Link is Down\n");
893                 return;
894         } else {
895                 netif_carrier_on(bp->dev);
896                 netdev_info(bp->dev, "NIC Link is Up, ");
897                 pr_cont("%d Mbps ", cur_data.line_speed);
898
899                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
900                                        &cur_data.link_report_flags))
901                         pr_cont("full duplex");
902                 else
903                         pr_cont("half duplex");
904
905                 /* Handle the FC at the end so that only these flags would be
906                  * possibly set. This way we may easily check if there is no FC
907                  * enabled.
908                  */
909                 if (cur_data.link_report_flags) {
910                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
911                                      &cur_data.link_report_flags)) {
912                                 pr_cont(", receive ");
913                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
914                                      &cur_data.link_report_flags))
915                                         pr_cont("& transmit ");
916                         } else {
917                                 pr_cont(", transmit ");
918                         }
919                         pr_cont("flow control ON");
920                 }
921                 pr_cont("\n");
922         }
923 }
924
925 void bnx2x_init_rx_rings(struct bnx2x *bp)
926 {
927         int func = BP_FUNC(bp);
928         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
929                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
930         u16 ring_prod;
931         int i, j;
932
933         /* Allocate TPA resources */
934         for_each_rx_queue(bp, j) {
935                 struct bnx2x_fastpath *fp = &bp->fp[j];
936
937                 DP(NETIF_MSG_IFUP,
938                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
939
940                 if (!fp->disable_tpa) {
941                         /* Fill the per-aggregation pool */
942                         for (i = 0; i < max_agg_queues; i++) {
943                                 fp->tpa_pool[i].skb =
944                                    netdev_alloc_skb(bp->dev, fp->rx_buf_size);
945                                 if (!fp->tpa_pool[i].skb) {
946                                         BNX2X_ERR("Failed to allocate TPA "
947                                                   "skb pool for queue[%d] - "
948                                                   "disabling TPA on this "
949                                                   "queue!\n", j);
950                                         bnx2x_free_tpa_pool(bp, fp, i);
951                                         fp->disable_tpa = 1;
952                                         break;
953                                 }
954                                 dma_unmap_addr_set((struct sw_rx_bd *)
955                                                         &bp->fp->tpa_pool[i],
956                                                    mapping, 0);
957                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
958                         }
959
960                         /* "next page" elements initialization */
961                         bnx2x_set_next_page_sgl(fp);
962
963                         /* set SGEs bit mask */
964                         bnx2x_init_sge_ring_bit_mask(fp);
965
966                         /* Allocate SGEs and initialize the ring elements */
967                         for (i = 0, ring_prod = 0;
968                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
969
970                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
971                                         BNX2X_ERR("was only able to allocate "
972                                                   "%d rx sges\n", i);
973                                         BNX2X_ERR("disabling TPA for"
974                                                   " queue[%d]\n", j);
975                                         /* Cleanup already allocated elements */
976                                         bnx2x_free_rx_sge_range(bp,
977                                                                 fp, ring_prod);
978                                         bnx2x_free_tpa_pool(bp,
979                                                             fp, max_agg_queues);
980                                         fp->disable_tpa = 1;
981                                         ring_prod = 0;
982                                         break;
983                                 }
984                                 ring_prod = NEXT_SGE_IDX(ring_prod);
985                         }
986
987                         fp->rx_sge_prod = ring_prod;
988                 }
989         }
990
991         for_each_rx_queue(bp, j) {
992                 struct bnx2x_fastpath *fp = &bp->fp[j];
993
994                 fp->rx_bd_cons = 0;
995
996                 /* Activate BD ring */
997                 /* Warning!
998                  * this will generate an interrupt (to the TSTORM)
999                  * must only be done after chip is initialized
1000                  */
1001                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1002                                      fp->rx_sge_prod);
1003
1004                 if (j != 0)
1005                         continue;
1006
1007                 if (!CHIP_IS_E2(bp)) {
1008                         REG_WR(bp, BAR_USTRORM_INTMEM +
1009                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1010                                U64_LO(fp->rx_comp_mapping));
1011                         REG_WR(bp, BAR_USTRORM_INTMEM +
1012                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1013                                U64_HI(fp->rx_comp_mapping));
1014                 }
1015         }
1016 }
1017
1018 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1019 {
1020         int i;
1021
1022         for_each_tx_queue(bp, i) {
1023                 struct bnx2x_fastpath *fp = &bp->fp[i];
1024
1025                 u16 bd_cons = fp->tx_bd_cons;
1026                 u16 sw_prod = fp->tx_pkt_prod;
1027                 u16 sw_cons = fp->tx_pkt_cons;
1028
1029                 while (sw_cons != sw_prod) {
1030                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1031                         sw_cons++;
1032                 }
1033         }
1034 }
1035
1036 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1037 {
1038         struct bnx2x *bp = fp->bp;
1039         int i;
1040
1041         /* ring wasn't allocated */
1042         if (fp->rx_buf_ring == NULL)
1043                 return;
1044
1045         for (i = 0; i < NUM_RX_BD; i++) {
1046                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1047                 struct sk_buff *skb = rx_buf->skb;
1048
1049                 if (skb == NULL)
1050                         continue;
1051
1052                 dma_unmap_single(&bp->pdev->dev,
1053                                  dma_unmap_addr(rx_buf, mapping),
1054                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1055
1056                 rx_buf->skb = NULL;
1057                 dev_kfree_skb(skb);
1058         }
1059 }
1060
1061 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1062 {
1063         int j;
1064
1065         for_each_rx_queue(bp, j) {
1066                 struct bnx2x_fastpath *fp = &bp->fp[j];
1067
1068                 bnx2x_free_rx_bds(fp);
1069
1070                 if (!fp->disable_tpa)
1071                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1072                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
1073                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
1074         }
1075 }
1076
1077 void bnx2x_free_skbs(struct bnx2x *bp)
1078 {
1079         bnx2x_free_tx_skbs(bp);
1080         bnx2x_free_rx_skbs(bp);
1081 }
1082
1083 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1084 {
1085         /* load old values */
1086         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1087
1088         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1089                 /* leave all but MAX value */
1090                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1091
1092                 /* set new MAX value */
1093                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1094                                 & FUNC_MF_CFG_MAX_BW_MASK;
1095
1096                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1097         }
1098 }
1099
1100 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1101 {
1102         int i, offset = 1;
1103
1104         free_irq(bp->msix_table[0].vector, bp->dev);
1105         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1106            bp->msix_table[0].vector);
1107
1108 #ifdef BCM_CNIC
1109         offset++;
1110 #endif
1111         for_each_eth_queue(bp, i) {
1112                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
1113                    "state %x\n", i, bp->msix_table[i + offset].vector,
1114                    bnx2x_fp(bp, i, state));
1115
1116                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1117         }
1118 }
1119
1120 void bnx2x_free_irq(struct bnx2x *bp)
1121 {
1122         if (bp->flags & USING_MSIX_FLAG)
1123                 bnx2x_free_msix_irqs(bp);
1124         else if (bp->flags & USING_MSI_FLAG)
1125                 free_irq(bp->pdev->irq, bp->dev);
1126         else
1127                 free_irq(bp->pdev->irq, bp->dev);
1128 }
1129
1130 int bnx2x_enable_msix(struct bnx2x *bp)
1131 {
1132         int msix_vec = 0, i, rc, req_cnt;
1133
1134         bp->msix_table[msix_vec].entry = msix_vec;
1135         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1136            bp->msix_table[0].entry);
1137         msix_vec++;
1138
1139 #ifdef BCM_CNIC
1140         bp->msix_table[msix_vec].entry = msix_vec;
1141         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1142            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1143         msix_vec++;
1144 #endif
1145         for_each_eth_queue(bp, i) {
1146                 bp->msix_table[msix_vec].entry = msix_vec;
1147                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1148                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1149                 msix_vec++;
1150         }
1151
1152         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1153
1154         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1155
1156         /*
1157          * reconfigure number of tx/rx queues according to available
1158          * MSI-X vectors
1159          */
1160         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1161                 /* how less vectors we will have? */
1162                 int diff = req_cnt - rc;
1163
1164                 DP(NETIF_MSG_IFUP,
1165                    "Trying to use less MSI-X vectors: %d\n", rc);
1166
1167                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1168
1169                 if (rc) {
1170                         DP(NETIF_MSG_IFUP,
1171                            "MSI-X is not attainable  rc %d\n", rc);
1172                         return rc;
1173                 }
1174                 /*
1175                  * decrease number of queues by number of unallocated entries
1176                  */
1177                 bp->num_queues -= diff;
1178
1179                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1180                                   bp->num_queues);
1181         } else if (rc) {
1182                 /* fall to INTx if not enough memory */
1183                 if (rc == -ENOMEM)
1184                         bp->flags |= DISABLE_MSI_FLAG;
1185                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1186                 return rc;
1187         }
1188
1189         bp->flags |= USING_MSIX_FLAG;
1190
1191         return 0;
1192 }
1193
1194 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1195 {
1196         int i, rc, offset = 1;
1197
1198         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1199                          bp->dev->name, bp->dev);
1200         if (rc) {
1201                 BNX2X_ERR("request sp irq failed\n");
1202                 return -EBUSY;
1203         }
1204
1205 #ifdef BCM_CNIC
1206         offset++;
1207 #endif
1208         for_each_eth_queue(bp, i) {
1209                 struct bnx2x_fastpath *fp = &bp->fp[i];
1210                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1211                          bp->dev->name, i);
1212
1213                 rc = request_irq(bp->msix_table[offset].vector,
1214                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1215                 if (rc) {
1216                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
1217                         bnx2x_free_msix_irqs(bp);
1218                         return -EBUSY;
1219                 }
1220
1221                 offset++;
1222                 fp->state = BNX2X_FP_STATE_IRQ;
1223         }
1224
1225         i = BNX2X_NUM_ETH_QUEUES(bp);
1226         offset = 1 + CNIC_CONTEXT_USE;
1227         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1228                " ... fp[%d] %d\n",
1229                bp->msix_table[0].vector,
1230                0, bp->msix_table[offset].vector,
1231                i - 1, bp->msix_table[offset + i - 1].vector);
1232
1233         return 0;
1234 }
1235
1236 int bnx2x_enable_msi(struct bnx2x *bp)
1237 {
1238         int rc;
1239
1240         rc = pci_enable_msi(bp->pdev);
1241         if (rc) {
1242                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1243                 return -1;
1244         }
1245         bp->flags |= USING_MSI_FLAG;
1246
1247         return 0;
1248 }
1249
1250 static int bnx2x_req_irq(struct bnx2x *bp)
1251 {
1252         unsigned long flags;
1253         int rc;
1254
1255         if (bp->flags & USING_MSI_FLAG)
1256                 flags = 0;
1257         else
1258                 flags = IRQF_SHARED;
1259
1260         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1261                          bp->dev->name, bp->dev);
1262         if (!rc)
1263                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1264
1265         return rc;
1266 }
1267
1268 static void bnx2x_napi_enable(struct bnx2x *bp)
1269 {
1270         int i;
1271
1272         for_each_napi_queue(bp, i)
1273                 napi_enable(&bnx2x_fp(bp, i, napi));
1274 }
1275
1276 static void bnx2x_napi_disable(struct bnx2x *bp)
1277 {
1278         int i;
1279
1280         for_each_napi_queue(bp, i)
1281                 napi_disable(&bnx2x_fp(bp, i, napi));
1282 }
1283
1284 void bnx2x_netif_start(struct bnx2x *bp)
1285 {
1286         if (netif_running(bp->dev)) {
1287                 bnx2x_napi_enable(bp);
1288                 bnx2x_int_enable(bp);
1289                 if (bp->state == BNX2X_STATE_OPEN)
1290                         netif_tx_wake_all_queues(bp->dev);
1291         }
1292 }
1293
1294 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1295 {
1296         bnx2x_int_disable_sync(bp, disable_hw);
1297         bnx2x_napi_disable(bp);
1298         netif_tx_disable(bp->dev);
1299 }
1300
1301 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1302 {
1303 #ifdef BCM_CNIC
1304         struct bnx2x *bp = netdev_priv(dev);
1305         if (NO_FCOE(bp))
1306                 return skb_tx_hash(dev, skb);
1307         else {
1308                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1309                 u16 ether_type = ntohs(hdr->h_proto);
1310
1311                 /* Skip VLAN tag if present */
1312                 if (ether_type == ETH_P_8021Q) {
1313                         struct vlan_ethhdr *vhdr =
1314                                 (struct vlan_ethhdr *)skb->data;
1315
1316                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1317                 }
1318
1319                 /* If ethertype is FCoE or FIP - use FCoE ring */
1320                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1321                         return bnx2x_fcoe(bp, index);
1322         }
1323 #endif
1324         /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
1325          */
1326         return __skb_tx_hash(dev, skb,
1327                         dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1328 }
1329
1330 void bnx2x_set_num_queues(struct bnx2x *bp)
1331 {
1332         switch (bp->multi_mode) {
1333         case ETH_RSS_MODE_DISABLED:
1334                 bp->num_queues = 1;
1335                 break;
1336         case ETH_RSS_MODE_REGULAR:
1337                 bp->num_queues = bnx2x_calc_num_queues(bp);
1338                 break;
1339
1340         default:
1341                 bp->num_queues = 1;
1342                 break;
1343         }
1344
1345         /* Add special queues */
1346         bp->num_queues += NONE_ETH_CONTEXT_USE;
1347 }
1348
1349 #ifdef BCM_CNIC
1350 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1351 {
1352         if (!NO_FCOE(bp)) {
1353                 if (!IS_MF_SD(bp))
1354                         bnx2x_set_fip_eth_mac_addr(bp, 1);
1355                 bnx2x_set_all_enode_macs(bp, 1);
1356                 bp->flags |= FCOE_MACS_SET;
1357         }
1358 }
1359 #endif
1360
1361 static void bnx2x_release_firmware(struct bnx2x *bp)
1362 {
1363         kfree(bp->init_ops_offsets);
1364         kfree(bp->init_ops);
1365         kfree(bp->init_data);
1366         release_firmware(bp->firmware);
1367 }
1368
1369 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1370 {
1371         int rc, num = bp->num_queues;
1372
1373 #ifdef BCM_CNIC
1374         if (NO_FCOE(bp))
1375                 num -= FCOE_CONTEXT_USE;
1376
1377 #endif
1378         netif_set_real_num_tx_queues(bp->dev, num);
1379         rc = netif_set_real_num_rx_queues(bp->dev, num);
1380         return rc;
1381 }
1382
1383 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1384 {
1385         int i;
1386
1387         for_each_queue(bp, i) {
1388                 struct bnx2x_fastpath *fp = &bp->fp[i];
1389
1390                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1391                 if (IS_FCOE_IDX(i))
1392                         /*
1393                          * Although there are no IP frames expected to arrive to
1394                          * this ring we still want to add an
1395                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1396                          * overrun attack.
1397                          */
1398                         fp->rx_buf_size =
1399                                 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1400                                 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1401                 else
1402                         fp->rx_buf_size =
1403                                 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1404                                 IP_HEADER_ALIGNMENT_PADDING;
1405         }
1406 }
1407
1408 /* must be called with rtnl_lock */
1409 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1410 {
1411         u32 load_code;
1412         int i, rc;
1413
1414         /* Set init arrays */
1415         rc = bnx2x_init_firmware(bp);
1416         if (rc) {
1417                 BNX2X_ERR("Error loading firmware\n");
1418                 return rc;
1419         }
1420
1421 #ifdef BNX2X_STOP_ON_ERROR
1422         if (unlikely(bp->panic))
1423                 return -EPERM;
1424 #endif
1425
1426         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1427
1428         /* Set the initial link reported state to link down */
1429         bnx2x_acquire_phy_lock(bp);
1430         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1431         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1432                 &bp->last_reported_link.link_report_flags);
1433         bnx2x_release_phy_lock(bp);
1434
1435         /* must be called before memory allocation and HW init */
1436         bnx2x_ilt_set_info(bp);
1437
1438         /* zero fastpath structures preserving invariants like napi which are
1439          * allocated only once
1440          */
1441         for_each_queue(bp, i)
1442                 bnx2x_bz_fp(bp, i);
1443
1444         /* Set the receive queues buffer size */
1445         bnx2x_set_rx_buf_size(bp);
1446
1447         for_each_queue(bp, i)
1448                 bnx2x_fp(bp, i, disable_tpa) =
1449                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1450
1451 #ifdef BCM_CNIC
1452         /* We don't want TPA on FCoE L2 ring */
1453         bnx2x_fcoe(bp, disable_tpa) = 1;
1454 #endif
1455
1456         if (bnx2x_alloc_mem(bp))
1457                 return -ENOMEM;
1458
1459         /* As long as bnx2x_alloc_mem() may possibly update
1460          * bp->num_queues, bnx2x_set_real_num_queues() should always
1461          * come after it.
1462          */
1463         rc = bnx2x_set_real_num_queues(bp);
1464         if (rc) {
1465                 BNX2X_ERR("Unable to set real_num_queues\n");
1466                 goto load_error0;
1467         }
1468
1469         bnx2x_napi_enable(bp);
1470
1471         /* Send LOAD_REQUEST command to MCP
1472            Returns the type of LOAD command:
1473            if it is the first port to be initialized
1474            common blocks should be initialized, otherwise - not
1475         */
1476         if (!BP_NOMCP(bp)) {
1477                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1478                 if (!load_code) {
1479                         BNX2X_ERR("MCP response failure, aborting\n");
1480                         rc = -EBUSY;
1481                         goto load_error1;
1482                 }
1483                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1484                         rc = -EBUSY; /* other port in diagnostic mode */
1485                         goto load_error1;
1486                 }
1487
1488         } else {
1489                 int path = BP_PATH(bp);
1490                 int port = BP_PORT(bp);
1491
1492                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1493                    path, load_count[path][0], load_count[path][1],
1494                    load_count[path][2]);
1495                 load_count[path][0]++;
1496                 load_count[path][1 + port]++;
1497                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1498                    path, load_count[path][0], load_count[path][1],
1499                    load_count[path][2]);
1500                 if (load_count[path][0] == 1)
1501                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1502                 else if (load_count[path][1 + port] == 1)
1503                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1504                 else
1505                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1506         }
1507
1508         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1509             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1510             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1511                 bp->port.pmf = 1;
1512         else
1513                 bp->port.pmf = 0;
1514         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1515
1516         /* Initialize HW */
1517         rc = bnx2x_init_hw(bp, load_code);
1518         if (rc) {
1519                 BNX2X_ERR("HW init failed, aborting\n");
1520                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1521                 goto load_error2;
1522         }
1523
1524         /* Connect to IRQs */
1525         rc = bnx2x_setup_irqs(bp);
1526         if (rc) {
1527                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1528                 goto load_error2;
1529         }
1530
1531         /* Setup NIC internals and enable interrupts */
1532         bnx2x_nic_init(bp, load_code);
1533
1534         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1535             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1536             (bp->common.shmem2_base))
1537                 SHMEM2_WR(bp, dcc_support,
1538                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1539                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1540
1541         /* Send LOAD_DONE command to MCP */
1542         if (!BP_NOMCP(bp)) {
1543                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1544                 if (!load_code) {
1545                         BNX2X_ERR("MCP response failure, aborting\n");
1546                         rc = -EBUSY;
1547                         goto load_error3;
1548                 }
1549         }
1550
1551         bnx2x_dcbx_init(bp);
1552
1553         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1554
1555         rc = bnx2x_func_start(bp);
1556         if (rc) {
1557                 BNX2X_ERR("Function start failed!\n");
1558 #ifndef BNX2X_STOP_ON_ERROR
1559                 goto load_error3;
1560 #else
1561                 bp->panic = 1;
1562                 return -EBUSY;
1563 #endif
1564         }
1565
1566         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1567         if (rc) {
1568                 BNX2X_ERR("Setup leading failed!\n");
1569 #ifndef BNX2X_STOP_ON_ERROR
1570                 goto load_error3;
1571 #else
1572                 bp->panic = 1;
1573                 return -EBUSY;
1574 #endif
1575         }
1576
1577         if (!CHIP_IS_E1(bp) &&
1578             (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1579                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1580                 bp->flags |= MF_FUNC_DIS;
1581         }
1582
1583 #ifdef BCM_CNIC
1584         /* Enable Timer scan */
1585         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1586 #endif
1587
1588         for_each_nondefault_queue(bp, i) {
1589                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1590                 if (rc)
1591 #ifdef BCM_CNIC
1592                         goto load_error4;
1593 #else
1594                         goto load_error3;
1595 #endif
1596         }
1597
1598         /* Now when Clients are configured we are ready to work */
1599         bp->state = BNX2X_STATE_OPEN;
1600
1601 #ifdef BCM_CNIC
1602         bnx2x_set_fcoe_eth_macs(bp);
1603 #endif
1604
1605         bnx2x_set_eth_mac(bp, 1);
1606
1607         /* Clear MC configuration */
1608         if (CHIP_IS_E1(bp))
1609                 bnx2x_invalidate_e1_mc_list(bp);
1610         else
1611                 bnx2x_invalidate_e1h_mc_list(bp);
1612
1613         /* Clear UC lists configuration */
1614         bnx2x_invalidate_uc_list(bp);
1615
1616         if (bp->pending_max) {
1617                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1618                 bp->pending_max = 0;
1619         }
1620
1621         if (bp->port.pmf)
1622                 bnx2x_initial_phy_init(bp, load_mode);
1623
1624         /* Initialize Rx filtering */
1625         bnx2x_set_rx_mode(bp->dev);
1626
1627         /* Start fast path */
1628         switch (load_mode) {
1629         case LOAD_NORMAL:
1630                 /* Tx queue should be only reenabled */
1631                 netif_tx_wake_all_queues(bp->dev);
1632                 /* Initialize the receive filter. */
1633                 break;
1634
1635         case LOAD_OPEN:
1636                 netif_tx_start_all_queues(bp->dev);
1637                 smp_mb__after_clear_bit();
1638                 break;
1639
1640         case LOAD_DIAG:
1641                 bp->state = BNX2X_STATE_DIAG;
1642                 break;
1643
1644         default:
1645                 break;
1646         }
1647
1648         if (!bp->port.pmf)
1649                 bnx2x__link_status_update(bp);
1650
1651         /* start the timer */
1652         mod_timer(&bp->timer, jiffies + bp->current_interval);
1653
1654 #ifdef BCM_CNIC
1655         bnx2x_setup_cnic_irq_info(bp);
1656         if (bp->state == BNX2X_STATE_OPEN)
1657                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1658 #endif
1659         bnx2x_inc_load_cnt(bp);
1660
1661         bnx2x_release_firmware(bp);
1662
1663         return 0;
1664
1665 #ifdef BCM_CNIC
1666 load_error4:
1667         /* Disable Timer scan */
1668         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1669 #endif
1670 load_error3:
1671         bnx2x_int_disable_sync(bp, 1);
1672
1673         /* Free SKBs, SGEs, TPA pool and driver internals */
1674         bnx2x_free_skbs(bp);
1675         for_each_rx_queue(bp, i)
1676                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1677
1678         /* Release IRQs */
1679         bnx2x_free_irq(bp);
1680 load_error2:
1681         if (!BP_NOMCP(bp)) {
1682                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1683                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1684         }
1685
1686         bp->port.pmf = 0;
1687 load_error1:
1688         bnx2x_napi_disable(bp);
1689 load_error0:
1690         bnx2x_free_mem(bp);
1691
1692         bnx2x_release_firmware(bp);
1693
1694         return rc;
1695 }
1696
1697 /* must be called with rtnl_lock */
1698 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1699 {
1700         int i;
1701
1702         if (bp->state == BNX2X_STATE_CLOSED) {
1703                 /* Interface has been removed - nothing to recover */
1704                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1705                 bp->is_leader = 0;
1706                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1707                 smp_wmb();
1708
1709                 return -EINVAL;
1710         }
1711
1712 #ifdef BCM_CNIC
1713         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1714 #endif
1715         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1716
1717         /* Set "drop all" */
1718         bp->rx_mode = BNX2X_RX_MODE_NONE;
1719         bnx2x_set_storm_rx_mode(bp);
1720
1721         /* Stop Tx */
1722         bnx2x_tx_disable(bp);
1723
1724         del_timer_sync(&bp->timer);
1725
1726         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1727                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1728
1729         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1730
1731         /* Cleanup the chip if needed */
1732         if (unload_mode != UNLOAD_RECOVERY)
1733                 bnx2x_chip_cleanup(bp, unload_mode);
1734         else {
1735                 /* Disable HW interrupts, NAPI and Tx */
1736                 bnx2x_netif_stop(bp, 1);
1737
1738                 /* Release IRQs */
1739                 bnx2x_free_irq(bp);
1740         }
1741
1742         bp->port.pmf = 0;
1743
1744         /* Free SKBs, SGEs, TPA pool and driver internals */
1745         bnx2x_free_skbs(bp);
1746         for_each_rx_queue(bp, i)
1747                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1748
1749         bnx2x_free_mem(bp);
1750
1751         bp->state = BNX2X_STATE_CLOSED;
1752
1753         /* The last driver must disable a "close the gate" if there is no
1754          * parity attention or "process kill" pending.
1755          */
1756         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1757             bnx2x_reset_is_done(bp))
1758                 bnx2x_disable_close_the_gate(bp);
1759
1760         /* Reset MCP mail box sequence if there is on going recovery */
1761         if (unload_mode == UNLOAD_RECOVERY)
1762                 bp->fw_seq = 0;
1763
1764         return 0;
1765 }
1766
1767 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1768 {
1769         u16 pmcsr;
1770
1771         /* If there is no power capability, silently succeed */
1772         if (!bp->pm_cap) {
1773                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1774                 return 0;
1775         }
1776
1777         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1778
1779         switch (state) {
1780         case PCI_D0:
1781                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1782                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1783                                        PCI_PM_CTRL_PME_STATUS));
1784
1785                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1786                         /* delay required during transition out of D3hot */
1787                         msleep(20);
1788                 break;
1789
1790         case PCI_D3hot:
1791                 /* If there are other clients above don't
1792                    shut down the power */
1793                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1794                         return 0;
1795                 /* Don't shut down the power for emulation and FPGA */
1796                 if (CHIP_REV_IS_SLOW(bp))
1797                         return 0;
1798
1799                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1800                 pmcsr |= 3;
1801
1802                 if (bp->wol)
1803                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1804
1805                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1806                                       pmcsr);
1807
1808                 /* No more memory access after this point until
1809                 * device is brought back to D0.
1810                 */
1811                 break;
1812
1813         default:
1814                 return -EINVAL;
1815         }
1816         return 0;
1817 }
1818
1819 /*
1820  * net_device service functions
1821  */
1822 int bnx2x_poll(struct napi_struct *napi, int budget)
1823 {
1824         int work_done = 0;
1825         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1826                                                  napi);
1827         struct bnx2x *bp = fp->bp;
1828
1829         while (1) {
1830 #ifdef BNX2X_STOP_ON_ERROR
1831                 if (unlikely(bp->panic)) {
1832                         napi_complete(napi);
1833                         return 0;
1834                 }
1835 #endif
1836
1837                 if (bnx2x_has_tx_work(fp))
1838                         bnx2x_tx_int(fp);
1839
1840                 if (bnx2x_has_rx_work(fp)) {
1841                         work_done += bnx2x_rx_int(fp, budget - work_done);
1842
1843                         /* must not complete if we consumed full budget */
1844                         if (work_done >= budget)
1845                                 break;
1846                 }
1847
1848                 /* Fall out from the NAPI loop if needed */
1849                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1850 #ifdef BCM_CNIC
1851                         /* No need to update SB for FCoE L2 ring as long as
1852                          * it's connected to the default SB and the SB
1853                          * has been updated when NAPI was scheduled.
1854                          */
1855                         if (IS_FCOE_FP(fp)) {
1856                                 napi_complete(napi);
1857                                 break;
1858                         }
1859 #endif
1860
1861                         bnx2x_update_fpsb_idx(fp);
1862                         /* bnx2x_has_rx_work() reads the status block,
1863                          * thus we need to ensure that status block indices
1864                          * have been actually read (bnx2x_update_fpsb_idx)
1865                          * prior to this check (bnx2x_has_rx_work) so that
1866                          * we won't write the "newer" value of the status block
1867                          * to IGU (if there was a DMA right after
1868                          * bnx2x_has_rx_work and if there is no rmb, the memory
1869                          * reading (bnx2x_update_fpsb_idx) may be postponed
1870                          * to right before bnx2x_ack_sb). In this case there
1871                          * will never be another interrupt until there is
1872                          * another update of the status block, while there
1873                          * is still unhandled work.
1874                          */
1875                         rmb();
1876
1877                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1878                                 napi_complete(napi);
1879                                 /* Re-enable interrupts */
1880                                 DP(NETIF_MSG_HW,
1881                                    "Update index to %d\n", fp->fp_hc_idx);
1882                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1883                                              le16_to_cpu(fp->fp_hc_idx),
1884                                              IGU_INT_ENABLE, 1);
1885                                 break;
1886                         }
1887                 }
1888         }
1889
1890         return work_done;
1891 }
1892
1893 /* we split the first BD into headers and data BDs
1894  * to ease the pain of our fellow microcode engineers
1895  * we use one mapping for both BDs
1896  * So far this has only been observed to happen
1897  * in Other Operating Systems(TM)
1898  */
1899 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1900                                    struct bnx2x_fastpath *fp,
1901                                    struct sw_tx_bd *tx_buf,
1902                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1903                                    u16 bd_prod, int nbd)
1904 {
1905         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1906         struct eth_tx_bd *d_tx_bd;
1907         dma_addr_t mapping;
1908         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1909
1910         /* first fix first BD */
1911         h_tx_bd->nbd = cpu_to_le16(nbd);
1912         h_tx_bd->nbytes = cpu_to_le16(hlen);
1913
1914         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1915            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1916            h_tx_bd->addr_lo, h_tx_bd->nbd);
1917
1918         /* now get a new data BD
1919          * (after the pbd) and fill it */
1920         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1921         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1922
1923         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1924                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1925
1926         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1927         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1928         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1929
1930         /* this marks the BD as one that has no individual mapping */
1931         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1932
1933         DP(NETIF_MSG_TX_QUEUED,
1934            "TSO split data size is %d (%x:%x)\n",
1935            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1936
1937         /* update tx_bd */
1938         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1939
1940         return bd_prod;
1941 }
1942
1943 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1944 {
1945         if (fix > 0)
1946                 csum = (u16) ~csum_fold(csum_sub(csum,
1947                                 csum_partial(t_header - fix, fix, 0)));
1948
1949         else if (fix < 0)
1950                 csum = (u16) ~csum_fold(csum_add(csum,
1951                                 csum_partial(t_header, -fix, 0)));
1952
1953         return swab16(csum);
1954 }
1955
1956 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1957 {
1958         u32 rc;
1959
1960         if (skb->ip_summed != CHECKSUM_PARTIAL)
1961                 rc = XMIT_PLAIN;
1962
1963         else {
1964                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1965                         rc = XMIT_CSUM_V6;
1966                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1967                                 rc |= XMIT_CSUM_TCP;
1968
1969                 } else {
1970                         rc = XMIT_CSUM_V4;
1971                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1972                                 rc |= XMIT_CSUM_TCP;
1973                 }
1974         }
1975
1976         if (skb_is_gso_v6(skb))
1977                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1978         else if (skb_is_gso(skb))
1979                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1980
1981         return rc;
1982 }
1983
1984 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1985 /* check if packet requires linearization (packet is too fragmented)
1986    no need to check fragmentation if page size > 8K (there will be no
1987    violation to FW restrictions) */
1988 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1989                              u32 xmit_type)
1990 {
1991         int to_copy = 0;
1992         int hlen = 0;
1993         int first_bd_sz = 0;
1994
1995         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1996         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1997
1998                 if (xmit_type & XMIT_GSO) {
1999                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2000                         /* Check if LSO packet needs to be copied:
2001                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2002                         int wnd_size = MAX_FETCH_BD - 3;
2003                         /* Number of windows to check */
2004                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2005                         int wnd_idx = 0;
2006                         int frag_idx = 0;
2007                         u32 wnd_sum = 0;
2008
2009                         /* Headers length */
2010                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2011                                 tcp_hdrlen(skb);
2012
2013                         /* Amount of data (w/o headers) on linear part of SKB*/
2014                         first_bd_sz = skb_headlen(skb) - hlen;
2015
2016                         wnd_sum  = first_bd_sz;
2017
2018                         /* Calculate the first sum - it's special */
2019                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2020                                 wnd_sum +=
2021                                         skb_shinfo(skb)->frags[frag_idx].size;
2022
2023                         /* If there was data on linear skb data - check it */
2024                         if (first_bd_sz > 0) {
2025                                 if (unlikely(wnd_sum < lso_mss)) {
2026                                         to_copy = 1;
2027                                         goto exit_lbl;
2028                                 }
2029
2030                                 wnd_sum -= first_bd_sz;
2031                         }
2032
2033                         /* Others are easier: run through the frag list and
2034                            check all windows */
2035                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2036                                 wnd_sum +=
2037                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2038
2039                                 if (unlikely(wnd_sum < lso_mss)) {
2040                                         to_copy = 1;
2041                                         break;
2042                                 }
2043                                 wnd_sum -=
2044                                         skb_shinfo(skb)->frags[wnd_idx].size;
2045                         }
2046                 } else {
2047                         /* in non-LSO too fragmented packet should always
2048                            be linearized */
2049                         to_copy = 1;
2050                 }
2051         }
2052
2053 exit_lbl:
2054         if (unlikely(to_copy))
2055                 DP(NETIF_MSG_TX_QUEUED,
2056                    "Linearization IS REQUIRED for %s packet. "
2057                    "num_frags %d  hlen %d  first_bd_sz %d\n",
2058                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2059                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2060
2061         return to_copy;
2062 }
2063 #endif
2064
2065 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2066                                         u32 xmit_type)
2067 {
2068         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2069                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2070                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2071         if ((xmit_type & XMIT_GSO_V6) &&
2072             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2073                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2074 }
2075
2076 /**
2077  * bnx2x_set_pbd_gso - update PBD in GSO case.
2078  *
2079  * @skb:        packet skb
2080  * @pbd:        parse BD
2081  * @xmit_type:  xmit flags
2082  */
2083 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2084                                      struct eth_tx_parse_bd_e1x *pbd,
2085                                      u32 xmit_type)
2086 {
2087         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2088         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2089         pbd->tcp_flags = pbd_tcp_flags(skb);
2090
2091         if (xmit_type & XMIT_GSO_V4) {
2092                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2093                 pbd->tcp_pseudo_csum =
2094                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2095                                                   ip_hdr(skb)->daddr,
2096                                                   0, IPPROTO_TCP, 0));
2097
2098         } else
2099                 pbd->tcp_pseudo_csum =
2100                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2101                                                 &ipv6_hdr(skb)->daddr,
2102                                                 0, IPPROTO_TCP, 0));
2103
2104         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2105 }
2106
2107 /**
2108  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2109  *
2110  * @bp:                 driver handle
2111  * @skb:                packet skb
2112  * @parsing_data:       data to be updated
2113  * @xmit_type:          xmit flags
2114  *
2115  * 57712 related
2116  */
2117 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2118         u32 *parsing_data, u32 xmit_type)
2119 {
2120         *parsing_data |=
2121                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2122                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2123                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2124
2125         if (xmit_type & XMIT_CSUM_TCP) {
2126                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2127                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2128                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2129
2130                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2131         } else
2132                 /* We support checksum offload for TCP and UDP only.
2133                  * No need to pass the UDP header length - it's a constant.
2134                  */
2135                 return skb_transport_header(skb) +
2136                                 sizeof(struct udphdr) - skb->data;
2137 }
2138
2139 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2140         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2141 {
2142
2143         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2144
2145         if (xmit_type & XMIT_CSUM_V4)
2146                 tx_start_bd->bd_flags.as_bitfield |=
2147                                         ETH_TX_BD_FLAGS_IP_CSUM;
2148         else
2149                 tx_start_bd->bd_flags.as_bitfield |=
2150                                         ETH_TX_BD_FLAGS_IPV6;
2151
2152         if (!(xmit_type & XMIT_CSUM_TCP))
2153                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2154
2155 }
2156
2157 /**
2158  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2159  *
2160  * @bp:         driver handle
2161  * @skb:        packet skb
2162  * @pbd:        parse BD to be updated
2163  * @xmit_type:  xmit flags
2164  */
2165 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2166         struct eth_tx_parse_bd_e1x *pbd,
2167         u32 xmit_type)
2168 {
2169         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2170
2171         /* for now NS flag is not used in Linux */
2172         pbd->global_data =
2173                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2174                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2175
2176         pbd->ip_hlen_w = (skb_transport_header(skb) -
2177                         skb_network_header(skb)) >> 1;
2178
2179         hlen += pbd->ip_hlen_w;
2180
2181         /* We support checksum offload for TCP and UDP only */
2182         if (xmit_type & XMIT_CSUM_TCP)
2183                 hlen += tcp_hdrlen(skb) / 2;
2184         else
2185                 hlen += sizeof(struct udphdr) / 2;
2186
2187         pbd->total_hlen_w = cpu_to_le16(hlen);
2188         hlen = hlen*2;
2189
2190         if (xmit_type & XMIT_CSUM_TCP) {
2191                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2192
2193         } else {
2194                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2195
2196                 DP(NETIF_MSG_TX_QUEUED,
2197                    "hlen %d  fix %d  csum before fix %x\n",
2198                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2199
2200                 /* HW bug: fixup the CSUM */
2201                 pbd->tcp_pseudo_csum =
2202                         bnx2x_csum_fix(skb_transport_header(skb),
2203                                        SKB_CS(skb), fix);
2204
2205                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2206                    pbd->tcp_pseudo_csum);
2207         }
2208
2209         return hlen;
2210 }
2211
2212 /* called with netif_tx_lock
2213  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2214  * netif_wake_queue()
2215  */
2216 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2217 {
2218         struct bnx2x *bp = netdev_priv(dev);
2219         struct bnx2x_fastpath *fp;
2220         struct netdev_queue *txq;
2221         struct sw_tx_bd *tx_buf;
2222         struct eth_tx_start_bd *tx_start_bd;
2223         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2224         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2225         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2226         u32 pbd_e2_parsing_data = 0;
2227         u16 pkt_prod, bd_prod;
2228         int nbd, fp_index;
2229         dma_addr_t mapping;
2230         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2231         int i;
2232         u8 hlen = 0;
2233         __le16 pkt_size = 0;
2234         struct ethhdr *eth;
2235         u8 mac_type = UNICAST_ADDRESS;
2236
2237 #ifdef BNX2X_STOP_ON_ERROR
2238         if (unlikely(bp->panic))
2239                 return NETDEV_TX_BUSY;
2240 #endif
2241
2242         fp_index = skb_get_queue_mapping(skb);
2243         txq = netdev_get_tx_queue(dev, fp_index);
2244
2245         fp = &bp->fp[fp_index];
2246
2247         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2248                 fp->eth_q_stats.driver_xoff++;
2249                 netif_tx_stop_queue(txq);
2250                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2251                 return NETDEV_TX_BUSY;
2252         }
2253
2254         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2255                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2256            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2257            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2258
2259         eth = (struct ethhdr *)skb->data;
2260
2261         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2262         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2263                 if (is_broadcast_ether_addr(eth->h_dest))
2264                         mac_type = BROADCAST_ADDRESS;
2265                 else
2266                         mac_type = MULTICAST_ADDRESS;
2267         }
2268
2269 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2270         /* First, check if we need to linearize the skb (due to FW
2271            restrictions). No need to check fragmentation if page size > 8K
2272            (there will be no violation to FW restrictions) */
2273         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2274                 /* Statistics of linearization */
2275                 bp->lin_cnt++;
2276                 if (skb_linearize(skb) != 0) {
2277                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2278                            "silently dropping this SKB\n");
2279                         dev_kfree_skb_any(skb);
2280                         return NETDEV_TX_OK;
2281                 }
2282         }
2283 #endif
2284
2285         /*
2286         Please read carefully. First we use one BD which we mark as start,
2287         then we have a parsing info BD (used for TSO or xsum),
2288         and only then we have the rest of the TSO BDs.
2289         (don't forget to mark the last one as last,
2290         and to unmap only AFTER you write to the BD ...)
2291         And above all, all pdb sizes are in words - NOT DWORDS!
2292         */
2293
2294         pkt_prod = fp->tx_pkt_prod++;
2295         bd_prod = TX_BD(fp->tx_bd_prod);
2296
2297         /* get a tx_buf and first BD */
2298         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2299         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2300
2301         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2302         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2303                  mac_type);
2304
2305         /* header nbd */
2306         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2307
2308         /* remember the first BD of the packet */
2309         tx_buf->first_bd = fp->tx_bd_prod;
2310         tx_buf->skb = skb;
2311         tx_buf->flags = 0;
2312
2313         DP(NETIF_MSG_TX_QUEUED,
2314            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2315            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2316
2317         if (vlan_tx_tag_present(skb)) {
2318                 tx_start_bd->vlan_or_ethertype =
2319                     cpu_to_le16(vlan_tx_tag_get(skb));
2320                 tx_start_bd->bd_flags.as_bitfield |=
2321                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2322         } else
2323                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2324
2325         /* turn on parsing and get a BD */
2326         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2327
2328         if (xmit_type & XMIT_CSUM)
2329                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2330
2331         if (CHIP_IS_E2(bp)) {
2332                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2333                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2334                 /* Set PBD in checksum offload case */
2335                 if (xmit_type & XMIT_CSUM)
2336                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2337                                                      &pbd_e2_parsing_data,
2338                                                      xmit_type);
2339         } else {
2340                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2341                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2342                 /* Set PBD in checksum offload case */
2343                 if (xmit_type & XMIT_CSUM)
2344                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2345
2346         }
2347
2348         /* Map skb linear data for DMA */
2349         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2350                                  skb_headlen(skb), DMA_TO_DEVICE);
2351
2352         /* Setup the data pointer of the first BD of the packet */
2353         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2354         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2355         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2356         tx_start_bd->nbd = cpu_to_le16(nbd);
2357         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2358         pkt_size = tx_start_bd->nbytes;
2359
2360         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2361            "  nbytes %d  flags %x  vlan %x\n",
2362            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2363            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2364            tx_start_bd->bd_flags.as_bitfield,
2365            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2366
2367         if (xmit_type & XMIT_GSO) {
2368
2369                 DP(NETIF_MSG_TX_QUEUED,
2370                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2371                    skb->len, hlen, skb_headlen(skb),
2372                    skb_shinfo(skb)->gso_size);
2373
2374                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2375
2376                 if (unlikely(skb_headlen(skb) > hlen))
2377                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2378                                                  hlen, bd_prod, ++nbd);
2379                 if (CHIP_IS_E2(bp))
2380                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2381                                              xmit_type);
2382                 else
2383                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2384         }
2385
2386         /* Set the PBD's parsing_data field if not zero
2387          * (for the chips newer than 57711).
2388          */
2389         if (pbd_e2_parsing_data)
2390                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2391
2392         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2393
2394         /* Handle fragmented skb */
2395         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2396                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2397
2398                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2399                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2400                 if (total_pkt_bd == NULL)
2401                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2402
2403                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2404                                        frag->page_offset,
2405                                        frag->size, DMA_TO_DEVICE);
2406
2407                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2408                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2409                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2410                 le16_add_cpu(&pkt_size, frag->size);
2411
2412                 DP(NETIF_MSG_TX_QUEUED,
2413                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2414                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2415                    le16_to_cpu(tx_data_bd->nbytes));
2416         }
2417
2418         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2419
2420         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2421
2422         /* now send a tx doorbell, counting the next BD
2423          * if the packet contains or ends with it
2424          */
2425         if (TX_BD_POFF(bd_prod) < nbd)
2426                 nbd++;
2427
2428         if (total_pkt_bd != NULL)
2429                 total_pkt_bd->total_pkt_bytes = pkt_size;
2430
2431         if (pbd_e1x)
2432                 DP(NETIF_MSG_TX_QUEUED,
2433                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2434                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2435                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2436                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2437                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2438                     le16_to_cpu(pbd_e1x->total_hlen_w));
2439         if (pbd_e2)
2440                 DP(NETIF_MSG_TX_QUEUED,
2441                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2442                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2443                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2444                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2445                    pbd_e2->parsing_data);
2446         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2447
2448         /*
2449          * Make sure that the BD data is updated before updating the producer
2450          * since FW might read the BD right after the producer is updated.
2451          * This is only applicable for weak-ordered memory model archs such
2452          * as IA-64. The following barrier is also mandatory since FW will
2453          * assumes packets must have BDs.
2454          */
2455         wmb();
2456
2457         fp->tx_db.data.prod += nbd;
2458         barrier();
2459
2460         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2461
2462         mmiowb();
2463
2464         fp->tx_bd_prod += nbd;
2465
2466         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2467                 netif_tx_stop_queue(txq);
2468
2469                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2470                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2471                  * fp->bd_tx_cons */
2472                 smp_mb();
2473
2474                 fp->eth_q_stats.driver_xoff++;
2475                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2476                         netif_tx_wake_queue(txq);
2477         }
2478         fp->tx_pkt++;
2479
2480         return NETDEV_TX_OK;
2481 }
2482
2483 /* called with rtnl_lock */
2484 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2485 {
2486         struct sockaddr *addr = p;
2487         struct bnx2x *bp = netdev_priv(dev);
2488
2489         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2490                 return -EINVAL;
2491
2492         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2493         if (netif_running(dev))
2494                 bnx2x_set_eth_mac(bp, 1);
2495
2496         return 0;
2497 }
2498
2499 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2500 {
2501         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2502         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2503
2504         /* Common */
2505 #ifdef BCM_CNIC
2506         if (IS_FCOE_IDX(fp_index)) {
2507                 memset(sb, 0, sizeof(union host_hc_status_block));
2508                 fp->status_blk_mapping = 0;
2509
2510         } else {
2511 #endif
2512                 /* status blocks */
2513                 if (CHIP_IS_E2(bp))
2514                         BNX2X_PCI_FREE(sb->e2_sb,
2515                                        bnx2x_fp(bp, fp_index,
2516                                                 status_blk_mapping),
2517                                        sizeof(struct host_hc_status_block_e2));
2518                 else
2519                         BNX2X_PCI_FREE(sb->e1x_sb,
2520                                        bnx2x_fp(bp, fp_index,
2521                                                 status_blk_mapping),
2522                                        sizeof(struct host_hc_status_block_e1x));
2523 #ifdef BCM_CNIC
2524         }
2525 #endif
2526         /* Rx */
2527         if (!skip_rx_queue(bp, fp_index)) {
2528                 bnx2x_free_rx_bds(fp);
2529
2530                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2531                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2532                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2533                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
2534                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
2535
2536                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2537                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
2538                                sizeof(struct eth_fast_path_rx_cqe) *
2539                                NUM_RCQ_BD);
2540
2541                 /* SGE ring */
2542                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2543                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2544                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
2545                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2546         }
2547
2548         /* Tx */
2549         if (!skip_tx_queue(bp, fp_index)) {
2550                 /* fastpath tx rings: tx_buf tx_desc */
2551                 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2552                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2553                                bnx2x_fp(bp, fp_index, tx_desc_mapping),
2554                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2555         }
2556         /* end of fastpath */
2557 }
2558
2559 void bnx2x_free_fp_mem(struct bnx2x *bp)
2560 {
2561         int i;
2562         for_each_queue(bp, i)
2563                 bnx2x_free_fp_mem_at(bp, i);
2564 }
2565
2566 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2567 {
2568         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2569         if (CHIP_IS_E2(bp)) {
2570                 bnx2x_fp(bp, index, sb_index_values) =
2571                         (__le16 *)status_blk.e2_sb->sb.index_values;
2572                 bnx2x_fp(bp, index, sb_running_index) =
2573                         (__le16 *)status_blk.e2_sb->sb.running_index;
2574         } else {
2575                 bnx2x_fp(bp, index, sb_index_values) =
2576                         (__le16 *)status_blk.e1x_sb->sb.index_values;
2577                 bnx2x_fp(bp, index, sb_running_index) =
2578                         (__le16 *)status_blk.e1x_sb->sb.running_index;
2579         }
2580 }
2581
2582 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2583 {
2584         union host_hc_status_block *sb;
2585         struct bnx2x_fastpath *fp = &bp->fp[index];
2586         int ring_size = 0;
2587
2588         /* if rx_ring_size specified - use it */
2589         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2590                            MAX_RX_AVAIL/bp->num_queues;
2591
2592         /* allocate at least number of buffers required by FW */
2593         rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2594                                                     MIN_RX_SIZE_TPA,
2595                                   rx_ring_size);
2596
2597         bnx2x_fp(bp, index, bp) = bp;
2598         bnx2x_fp(bp, index, index) = index;
2599
2600         /* Common */
2601         sb = &bnx2x_fp(bp, index, status_blk);
2602 #ifdef BCM_CNIC
2603         if (!IS_FCOE_IDX(index)) {
2604 #endif
2605                 /* status blocks */
2606                 if (CHIP_IS_E2(bp))
2607                         BNX2X_PCI_ALLOC(sb->e2_sb,
2608                                 &bnx2x_fp(bp, index, status_blk_mapping),
2609                                 sizeof(struct host_hc_status_block_e2));
2610                 else
2611                         BNX2X_PCI_ALLOC(sb->e1x_sb,
2612                                 &bnx2x_fp(bp, index, status_blk_mapping),
2613                             sizeof(struct host_hc_status_block_e1x));
2614 #ifdef BCM_CNIC
2615         }
2616 #endif
2617
2618         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
2619          * set shortcuts for it.
2620          */
2621         if (!IS_FCOE_IDX(index))
2622                 set_sb_shortcuts(bp, index);
2623
2624         /* Tx */
2625         if (!skip_tx_queue(bp, index)) {
2626                 /* fastpath tx rings: tx_buf tx_desc */
2627                 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2628                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2629                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2630                                 &bnx2x_fp(bp, index, tx_desc_mapping),
2631                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2632         }
2633
2634         /* Rx */
2635         if (!skip_rx_queue(bp, index)) {
2636                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2637                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2638                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2639                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2640                                 &bnx2x_fp(bp, index, rx_desc_mapping),
2641                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2642
2643                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2644                                 &bnx2x_fp(bp, index, rx_comp_mapping),
2645                                 sizeof(struct eth_fast_path_rx_cqe) *
2646                                 NUM_RCQ_BD);
2647
2648                 /* SGE ring */
2649                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2650                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2651                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2652                                 &bnx2x_fp(bp, index, rx_sge_mapping),
2653                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2654                 /* RX BD ring */
2655                 bnx2x_set_next_page_rx_bd(fp);
2656
2657                 /* CQ ring */
2658                 bnx2x_set_next_page_rx_cq(fp);
2659
2660                 /* BDs */
2661                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2662                 if (ring_size < rx_ring_size)
2663                         goto alloc_mem_err;
2664         }
2665
2666         return 0;
2667
2668 /* handles low memory cases */
2669 alloc_mem_err:
2670         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2671                                                 index, ring_size);
2672         /* FW will drop all packets if queue is not big enough,
2673          * In these cases we disable the queue
2674          * Min size diferent for TPA and non-TPA queues
2675          */
2676         if (ring_size < (fp->disable_tpa ?
2677                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
2678                         /* release memory allocated for this queue */
2679                         bnx2x_free_fp_mem_at(bp, index);
2680                         return -ENOMEM;
2681         }
2682         return 0;
2683 }
2684
2685 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2686 {
2687         int i;
2688
2689         /**
2690          * 1. Allocate FP for leading - fatal if error
2691          * 2. {CNIC} Allocate FCoE FP - fatal if error
2692          * 3. Allocate RSS - fix number of queues if error
2693          */
2694
2695         /* leading */
2696         if (bnx2x_alloc_fp_mem_at(bp, 0))
2697                 return -ENOMEM;
2698 #ifdef BCM_CNIC
2699         if (!NO_FCOE(bp))
2700                 /* FCoE */
2701                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2702                         /* we will fail load process instead of mark
2703                          * NO_FCOE_FLAG
2704                          */
2705                         return -ENOMEM;
2706 #endif
2707         /* RSS */
2708         for_each_nondefault_eth_queue(bp, i)
2709                 if (bnx2x_alloc_fp_mem_at(bp, i))
2710                         break;
2711
2712         /* handle memory failures */
2713         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2714                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2715
2716                 WARN_ON(delta < 0);
2717 #ifdef BCM_CNIC
2718                 /**
2719                  * move non eth FPs next to last eth FP
2720                  * must be done in that order
2721                  * FCOE_IDX < FWD_IDX < OOO_IDX
2722                  */
2723
2724                 /* move FCoE fp */
2725                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2726 #endif
2727                 bp->num_queues -= delta;
2728                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2729                           bp->num_queues + delta, bp->num_queues);
2730         }
2731
2732         return 0;
2733 }
2734
2735 static int bnx2x_setup_irqs(struct bnx2x *bp)
2736 {
2737         int rc = 0;
2738         if (bp->flags & USING_MSIX_FLAG) {
2739                 rc = bnx2x_req_msix_irqs(bp);
2740                 if (rc)
2741                         return rc;
2742         } else {
2743                 bnx2x_ack_int(bp);
2744                 rc = bnx2x_req_irq(bp);
2745                 if (rc) {
2746                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
2747                         return rc;
2748                 }
2749                 if (bp->flags & USING_MSI_FLAG) {
2750                         bp->dev->irq = bp->pdev->irq;
2751                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
2752                                bp->pdev->irq);
2753                 }
2754         }
2755
2756         return 0;
2757 }
2758
2759 void bnx2x_free_mem_bp(struct bnx2x *bp)
2760 {
2761         kfree(bp->fp);
2762         kfree(bp->msix_table);
2763         kfree(bp->ilt);
2764 }
2765
2766 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2767 {
2768         struct bnx2x_fastpath *fp;
2769         struct msix_entry *tbl;
2770         struct bnx2x_ilt *ilt;
2771
2772         /* fp array */
2773         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2774         if (!fp)
2775                 goto alloc_err;
2776         bp->fp = fp;
2777
2778         /* msix table */
2779         tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2780                                   GFP_KERNEL);
2781         if (!tbl)
2782                 goto alloc_err;
2783         bp->msix_table = tbl;
2784
2785         /* ilt */
2786         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2787         if (!ilt)
2788                 goto alloc_err;
2789         bp->ilt = ilt;
2790
2791         return 0;
2792 alloc_err:
2793         bnx2x_free_mem_bp(bp);
2794         return -ENOMEM;
2795
2796 }
2797
2798 static int bnx2x_reload_if_running(struct net_device *dev)
2799 {
2800         struct bnx2x *bp = netdev_priv(dev);
2801
2802         if (unlikely(!netif_running(dev)))
2803                 return 0;
2804
2805         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2806         return bnx2x_nic_load(bp, LOAD_NORMAL);
2807 }
2808
2809 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
2810 {
2811         u32 sel_phy_idx = 0;
2812         if (bp->link_params.num_phys <= 1)
2813                 return INT_PHY;
2814
2815         if (bp->link_vars.link_up) {
2816                 sel_phy_idx = EXT_PHY1;
2817                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
2818                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
2819                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
2820                         sel_phy_idx = EXT_PHY2;
2821         } else {
2822
2823                 switch (bnx2x_phy_selection(&bp->link_params)) {
2824                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2825                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
2826                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2827                        sel_phy_idx = EXT_PHY1;
2828                        break;
2829                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
2830                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2831                        sel_phy_idx = EXT_PHY2;
2832                        break;
2833                 }
2834         }
2835
2836         return sel_phy_idx;
2837
2838 }
2839 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
2840 {
2841         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
2842         /*
2843          * The selected actived PHY is always after swapping (in case PHY
2844          * swapping is enabled). So when swapping is enabled, we need to reverse
2845          * the configuration
2846          */
2847
2848         if (bp->link_params.multi_phy_config &
2849             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
2850                 if (sel_phy_idx == EXT_PHY1)
2851                         sel_phy_idx = EXT_PHY2;
2852                 else if (sel_phy_idx == EXT_PHY2)
2853                         sel_phy_idx = EXT_PHY1;
2854         }
2855         return LINK_CONFIG_IDX(sel_phy_idx);
2856 }
2857
2858 /* called with rtnl_lock */
2859 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2860 {
2861         struct bnx2x *bp = netdev_priv(dev);
2862
2863         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2864                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2865                 return -EAGAIN;
2866         }
2867
2868         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2869             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2870                 return -EINVAL;
2871
2872         /* This does not race with packet allocation
2873          * because the actual alloc size is
2874          * only updated as part of load
2875          */
2876         dev->mtu = new_mtu;
2877
2878         return bnx2x_reload_if_running(dev);
2879 }
2880
2881 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2882 {
2883         struct bnx2x *bp = netdev_priv(dev);
2884
2885         /* TPA requires Rx CSUM offloading */
2886         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2887                 features &= ~NETIF_F_LRO;
2888
2889         return features;
2890 }
2891
2892 int bnx2x_set_features(struct net_device *dev, u32 features)
2893 {
2894         struct bnx2x *bp = netdev_priv(dev);
2895         u32 flags = bp->flags;
2896         bool bnx2x_reload = false;
2897
2898         if (features & NETIF_F_LRO)
2899                 flags |= TPA_ENABLE_FLAG;
2900         else
2901                 flags &= ~TPA_ENABLE_FLAG;
2902
2903         if (features & NETIF_F_LOOPBACK) {
2904                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2905                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
2906                         bnx2x_reload = true;
2907                 }
2908         } else {
2909                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2910                         bp->link_params.loopback_mode = LOOPBACK_NONE;
2911                         bnx2x_reload = true;
2912                 }
2913         }
2914
2915         if (flags ^ bp->flags) {
2916                 bp->flags = flags;
2917                 bnx2x_reload = true;
2918         }
2919
2920         if (bnx2x_reload) {
2921                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2922                         return bnx2x_reload_if_running(dev);
2923                 /* else: bnx2x_nic_load() will be called at end of recovery */
2924         }
2925
2926         return 0;
2927 }
2928
2929 void bnx2x_tx_timeout(struct net_device *dev)
2930 {
2931         struct bnx2x *bp = netdev_priv(dev);
2932
2933 #ifdef BNX2X_STOP_ON_ERROR
2934         if (!bp->panic)
2935                 bnx2x_panic();
2936 #endif
2937         /* This allows the netif to be shutdown gracefully before resetting */
2938         schedule_delayed_work(&bp->reset_task, 0);
2939 }
2940
2941 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2942 {
2943         struct net_device *dev = pci_get_drvdata(pdev);
2944         struct bnx2x *bp;
2945
2946         if (!dev) {
2947                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2948                 return -ENODEV;
2949         }
2950         bp = netdev_priv(dev);
2951
2952         rtnl_lock();
2953
2954         pci_save_state(pdev);
2955
2956         if (!netif_running(dev)) {
2957                 rtnl_unlock();
2958                 return 0;
2959         }
2960
2961         netif_device_detach(dev);
2962
2963         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2964
2965         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2966
2967         rtnl_unlock();
2968
2969         return 0;
2970 }
2971
2972 int bnx2x_resume(struct pci_dev *pdev)
2973 {
2974         struct net_device *dev = pci_get_drvdata(pdev);
2975         struct bnx2x *bp;
2976         int rc;
2977
2978         if (!dev) {
2979                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2980                 return -ENODEV;
2981         }
2982         bp = netdev_priv(dev);
2983
2984         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2985                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2986                 return -EAGAIN;
2987         }
2988
2989         rtnl_lock();
2990
2991         pci_restore_state(pdev);
2992
2993         if (!netif_running(dev)) {
2994                 rtnl_unlock();
2995                 return 0;
2996         }
2997
2998         bnx2x_set_power_state(bp, PCI_D0);
2999         netif_device_attach(dev);
3000
3001         /* Since the chip was reset, clear the FW sequence number */
3002         bp->fw_seq = 0;
3003         rc = bnx2x_nic_load(bp, LOAD_OPEN);
3004
3005         rtnl_unlock();
3006
3007         return rc;
3008 }