New 7.0 FW: bnx2x, cnic, bnx2i, bnx2fc
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
21 #include <linux/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
27 #include "bnx2x_init.h"
28 #include "bnx2x_sp.h"
29
30
31
32 /**
33  * bnx2x_bz_fp - zero content of the fastpath structure.
34  *
35  * @bp:         driver handle
36  * @index:      fastpath index to be zeroed
37  *
38  * Makes sure the contents of the bp->fp[index].napi is kept
39  * intact.
40  */
41 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42 {
43         struct bnx2x_fastpath *fp = &bp->fp[index];
44         struct napi_struct orig_napi = fp->napi;
45         /* bzero bnx2x_fastpath contents */
46         memset(fp, 0, sizeof(*fp));
47
48         /* Restore the NAPI object as it has been already initialized */
49         fp->napi = orig_napi;
50 }
51
52 /**
53  * bnx2x_move_fp - move content of the fastpath structure.
54  *
55  * @bp:         driver handle
56  * @from:       source FP index
57  * @to:         destination FP index
58  *
59  * Makes sure the contents of the bp->fp[to].napi is kept
60  * intact.
61  */
62 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
63 {
64         struct bnx2x_fastpath *from_fp = &bp->fp[from];
65         struct bnx2x_fastpath *to_fp = &bp->fp[to];
66         struct napi_struct orig_napi = to_fp->napi;
67         /* Move bnx2x_fastpath contents */
68         memcpy(to_fp, from_fp, sizeof(*to_fp));
69         to_fp->index = to;
70
71         /* Restore the NAPI object as it has been already initialized */
72         to_fp->napi = orig_napi;
73 }
74
75 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
76
77 /* free skb in the packet ring at pos idx
78  * return idx of last bd freed
79  */
80 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
81                              u16 idx)
82 {
83         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
84         struct eth_tx_start_bd *tx_start_bd;
85         struct eth_tx_bd *tx_data_bd;
86         struct sk_buff *skb = tx_buf->skb;
87         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
88         int nbd;
89
90         /* prefetch skb end pointer to speedup dev_kfree_skb() */
91         prefetch(&skb->end);
92
93         DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
94            fp->index, idx, tx_buf, skb);
95
96         /* unmap first bd */
97         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
98         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
99         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
100                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
101
102
103         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
104 #ifdef BNX2X_STOP_ON_ERROR
105         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
106                 BNX2X_ERR("BAD nbd!\n");
107                 bnx2x_panic();
108         }
109 #endif
110         new_cons = nbd + tx_buf->first_bd;
111
112         /* Get the next bd */
113         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
114
115         /* Skip a parse bd... */
116         --nbd;
117         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
118
119         /* ...and the TSO split header bd since they have no mapping */
120         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
121                 --nbd;
122                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
123         }
124
125         /* now free frags */
126         while (nbd > 0) {
127
128                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
129                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
130                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
131                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
132                 if (--nbd)
133                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
134         }
135
136         /* release skb */
137         WARN_ON(!skb);
138         dev_kfree_skb_any(skb);
139         tx_buf->first_bd = 0;
140         tx_buf->skb = NULL;
141
142         return new_cons;
143 }
144
145 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
146 {
147         struct bnx2x *bp = fp->bp;
148         struct netdev_queue *txq;
149         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
150
151 #ifdef BNX2X_STOP_ON_ERROR
152         if (unlikely(bp->panic))
153                 return -1;
154 #endif
155
156         txq = netdev_get_tx_queue(bp->dev, fp->index);
157         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
158         sw_cons = fp->tx_pkt_cons;
159
160         while (sw_cons != hw_cons) {
161                 u16 pkt_cons;
162
163                 pkt_cons = TX_BD(sw_cons);
164
165                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
166                                       " pkt_cons %u\n",
167                    fp->index, hw_cons, sw_cons, pkt_cons);
168
169                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
170                 sw_cons++;
171         }
172
173         fp->tx_pkt_cons = sw_cons;
174         fp->tx_bd_cons = bd_cons;
175
176         /* Need to make the tx_bd_cons update visible to start_xmit()
177          * before checking for netif_tx_queue_stopped().  Without the
178          * memory barrier, there is a small possibility that
179          * start_xmit() will miss it and cause the queue to be stopped
180          * forever.
181          * On the other hand we need an rmb() here to ensure the proper
182          * ordering of bit testing in the following
183          * netif_tx_queue_stopped(txq) call.
184          */
185         smp_mb();
186
187         if (unlikely(netif_tx_queue_stopped(txq))) {
188                 /* Taking tx_lock() is needed to prevent reenabling the queue
189                  * while it's empty. This could have happen if rx_action() gets
190                  * suspended in bnx2x_tx_int() after the condition before
191                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
192                  *
193                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
194                  * sends some packets consuming the whole queue again->
195                  * stops the queue
196                  */
197
198                 __netif_tx_lock(txq, smp_processor_id());
199
200                 if ((netif_tx_queue_stopped(txq)) &&
201                     (bp->state == BNX2X_STATE_OPEN) &&
202                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
203                         netif_tx_wake_queue(txq);
204
205                 __netif_tx_unlock(txq);
206         }
207         return 0;
208 }
209
210 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
211                                              u16 idx)
212 {
213         u16 last_max = fp->last_max_sge;
214
215         if (SUB_S16(idx, last_max) > 0)
216                 fp->last_max_sge = idx;
217 }
218
219 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
220                                   struct eth_fast_path_rx_cqe *fp_cqe)
221 {
222         struct bnx2x *bp = fp->bp;
223         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
224                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
225                       SGE_PAGE_SHIFT;
226         u16 last_max, last_elem, first_elem;
227         u16 delta = 0;
228         u16 i;
229
230         if (!sge_len)
231                 return;
232
233         /* First mark all used pages */
234         for (i = 0; i < sge_len; i++)
235                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
236                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
237
238         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
239            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
240
241         /* Here we assume that the last SGE index is the biggest */
242         prefetch((void *)(fp->sge_mask));
243         bnx2x_update_last_max_sge(fp,
244                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
245
246         last_max = RX_SGE(fp->last_max_sge);
247         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
248         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
249
250         /* If ring is not full */
251         if (last_elem + 1 != first_elem)
252                 last_elem++;
253
254         /* Now update the prod */
255         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
256                 if (likely(fp->sge_mask[i]))
257                         break;
258
259                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
260                 delta += BIT_VEC64_ELEM_SZ;
261         }
262
263         if (delta > 0) {
264                 fp->rx_sge_prod += delta;
265                 /* clear page-end entries */
266                 bnx2x_clear_sge_mask_next_elems(fp);
267         }
268
269         DP(NETIF_MSG_RX_STATUS,
270            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
271            fp->last_max_sge, fp->rx_sge_prod);
272 }
273
274 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
275                             struct sk_buff *skb, u16 cons, u16 prod,
276                             struct eth_fast_path_rx_cqe *cqe)
277 {
278         struct bnx2x *bp = fp->bp;
279         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
280         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
281         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
282         dma_addr_t mapping;
283         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
284         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
285
286         /* print error if current state != stop */
287         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
288                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
289
290         /* Try to map an empty skb from the aggregation info  */
291         mapping = dma_map_single(&bp->pdev->dev,
292                                  first_buf->skb->data,
293                                  fp->rx_buf_size, DMA_FROM_DEVICE);
294         /*
295          *  ...if it fails - move the skb from the consumer to the producer
296          *  and set the current aggregation state as ERROR to drop it
297          *  when TPA_STOP arrives.
298          */
299
300         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
301                 /* Move the BD from the consumer to the producer */
302                 bnx2x_reuse_rx_skb(fp, cons, prod);
303                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
304                 return;
305         }
306
307         /* move empty skb from pool to prod */
308         prod_rx_buf->skb = first_buf->skb;
309         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
310         /* point prod_bd to new skb */
311         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
312         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
313
314         /* move partial skb from cons to pool (don't unmap yet) */
315         *first_buf = *cons_rx_buf;
316
317         /* mark bin state as START */
318         tpa_info->parsing_flags =
319                 le16_to_cpu(cqe->pars_flags.flags);
320         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
321         tpa_info->tpa_state = BNX2X_TPA_START;
322         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
323         tpa_info->placement_offset = cqe->placement_offset;
324
325 #ifdef BNX2X_STOP_ON_ERROR
326         fp->tpa_queue_used |= (1 << queue);
327 #ifdef _ASM_GENERIC_INT_L64_H
328         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
329 #else
330         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
331 #endif
332            fp->tpa_queue_used);
333 #endif
334 }
335
336 /* Timestamp option length allowed for TPA aggregation:
337  *
338  *              nop nop kind length echo val
339  */
340 #define TPA_TSTAMP_OPT_LEN      12
341 /**
342  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
343  *
344  * @bp:                 driver handle
345  * @parsing_flags:      parsing flags from the START CQE
346  * @len_on_bd:          total length of the first packet for the
347  *                      aggregation.
348  *
349  * Approximate value of the MSS for this aggregation calculated using
350  * the first packet of it.
351  */
352 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
353                                     u16 len_on_bd)
354 {
355         /*
356          * TPA arrgregation won't have either IP options or TCP options
357          * other than timestamp or IPv6 extension headers.
358          */
359         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
360
361         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
362             PRS_FLAG_OVERETH_IPV6)
363                 hdrs_len += sizeof(struct ipv6hdr);
364         else /* IPv4 */
365                 hdrs_len += sizeof(struct iphdr);
366
367
368         /* Check if there was a TCP timestamp, if there is it's will
369          * always be 12 bytes length: nop nop kind length echo val.
370          *
371          * Otherwise FW would close the aggregation.
372          */
373         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
374                 hdrs_len += TPA_TSTAMP_OPT_LEN;
375
376         return len_on_bd - hdrs_len;
377 }
378
379 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
380                                u16 queue, struct sk_buff *skb,
381                                struct eth_end_agg_rx_cqe *cqe,
382                                u16 cqe_idx)
383 {
384         struct sw_rx_page *rx_pg, old_rx_pg;
385         u32 i, frag_len, frag_size, pages;
386         int err;
387         int j;
388         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
389         u16 len_on_bd = tpa_info->len_on_bd;
390
391         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
392         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
393
394         /* This is needed in order to enable forwarding support */
395         if (frag_size)
396                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
397                                         tpa_info->parsing_flags, len_on_bd);
398
399 #ifdef BNX2X_STOP_ON_ERROR
400         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
401                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
402                           pages, cqe_idx);
403                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
404                 bnx2x_panic();
405                 return -EINVAL;
406         }
407 #endif
408
409         /* Run through the SGL and compose the fragmented skb */
410         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
411                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
412
413                 /* FW gives the indices of the SGE as if the ring is an array
414                    (meaning that "next" element will consume 2 indices) */
415                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
416                 rx_pg = &fp->rx_page_ring[sge_idx];
417                 old_rx_pg = *rx_pg;
418
419                 /* If we fail to allocate a substitute page, we simply stop
420                    where we are and drop the whole packet */
421                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
422                 if (unlikely(err)) {
423                         fp->eth_q_stats.rx_skb_alloc_failed++;
424                         return err;
425                 }
426
427                 /* Unmap the page as we r going to pass it to the stack */
428                 dma_unmap_page(&bp->pdev->dev,
429                                dma_unmap_addr(&old_rx_pg, mapping),
430                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
431
432                 /* Add one frag and update the appropriate fields in the skb */
433                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
434
435                 skb->data_len += frag_len;
436                 skb->truesize += frag_len;
437                 skb->len += frag_len;
438
439                 frag_size -= frag_len;
440         }
441
442         return 0;
443 }
444
445 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
446                            u16 queue, struct eth_end_agg_rx_cqe *cqe,
447                            u16 cqe_idx)
448 {
449         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
450         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
451         u8 pad = tpa_info->placement_offset;
452         u16 len = tpa_info->len_on_bd;
453         struct sk_buff *skb = rx_buf->skb;
454         /* alloc new skb */
455         struct sk_buff *new_skb;
456         u8 old_tpa_state = tpa_info->tpa_state;
457
458         tpa_info->tpa_state = BNX2X_TPA_STOP;
459
460         /* If we there was an error during the handling of the TPA_START -
461          * drop this aggregation.
462          */
463         if (old_tpa_state == BNX2X_TPA_ERROR)
464                 goto drop;
465
466         /* Try to allocate the new skb */
467         new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
468
469         /* Unmap skb in the pool anyway, as we are going to change
470            pool entry status to BNX2X_TPA_STOP even if new skb allocation
471            fails. */
472         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
473                          fp->rx_buf_size, DMA_FROM_DEVICE);
474
475         if (likely(new_skb)) {
476                 prefetch(skb);
477                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
478
479 #ifdef BNX2X_STOP_ON_ERROR
480                 if (pad + len > fp->rx_buf_size) {
481                         BNX2X_ERR("skb_put is about to fail...  "
482                                   "pad %d  len %d  rx_buf_size %d\n",
483                                   pad, len, fp->rx_buf_size);
484                         bnx2x_panic();
485                         return;
486                 }
487 #endif
488
489                 skb_reserve(skb, pad);
490                 skb_put(skb, len);
491
492                 skb->protocol = eth_type_trans(skb, bp->dev);
493                 skb->ip_summed = CHECKSUM_UNNECESSARY;
494
495                 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
496                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
497                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
498                         napi_gro_receive(&fp->napi, skb);
499                 } else {
500                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
501                            " - dropping packet!\n");
502                         dev_kfree_skb_any(skb);
503                 }
504
505
506                 /* put new skb in bin */
507                 rx_buf->skb = new_skb;
508
509                 return;
510         }
511
512 drop:
513         /* drop the packet and keep the buffer in the bin */
514         DP(NETIF_MSG_RX_STATUS,
515            "Failed to allocate or map a new skb - dropping packet!\n");
516         fp->eth_q_stats.rx_skb_alloc_failed++;
517 }
518
519 /* Set Toeplitz hash value in the skb using the value from the
520  * CQE (calculated by HW).
521  */
522 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
523                                         struct sk_buff *skb)
524 {
525         /* Set Toeplitz hash from CQE */
526         if ((bp->dev->features & NETIF_F_RXHASH) &&
527             (cqe->fast_path_cqe.status_flags &
528              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
529                 skb->rxhash =
530                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
531 }
532
533 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
534 {
535         struct bnx2x *bp = fp->bp;
536         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
537         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
538         int rx_pkt = 0;
539
540 #ifdef BNX2X_STOP_ON_ERROR
541         if (unlikely(bp->panic))
542                 return 0;
543 #endif
544
545         /* CQ "next element" is of the size of the regular element,
546            that's why it's ok here */
547         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
548         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
549                 hw_comp_cons++;
550
551         bd_cons = fp->rx_bd_cons;
552         bd_prod = fp->rx_bd_prod;
553         bd_prod_fw = bd_prod;
554         sw_comp_cons = fp->rx_comp_cons;
555         sw_comp_prod = fp->rx_comp_prod;
556
557         /* Memory barrier necessary as speculative reads of the rx
558          * buffer can be ahead of the index in the status block
559          */
560         rmb();
561
562         DP(NETIF_MSG_RX_STATUS,
563            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
564            fp->index, hw_comp_cons, sw_comp_cons);
565
566         while (sw_comp_cons != hw_comp_cons) {
567                 struct sw_rx_bd *rx_buf = NULL;
568                 struct sk_buff *skb;
569                 union eth_rx_cqe *cqe;
570                 struct eth_fast_path_rx_cqe *cqe_fp;
571                 u8 cqe_fp_flags;
572                 enum eth_rx_cqe_type cqe_fp_type;
573                 u16 len, pad;
574
575 #ifdef BNX2X_STOP_ON_ERROR
576                 if (unlikely(bp->panic))
577                         return 0;
578 #endif
579
580                 comp_ring_cons = RCQ_BD(sw_comp_cons);
581                 bd_prod = RX_BD(bd_prod);
582                 bd_cons = RX_BD(bd_cons);
583
584                 /* Prefetch the page containing the BD descriptor
585                    at producer's index. It will be needed when new skb is
586                    allocated */
587                 prefetch((void *)(PAGE_ALIGN((unsigned long)
588                                              (&fp->rx_desc_ring[bd_prod])) -
589                                   PAGE_SIZE + 1));
590
591                 cqe = &fp->rx_comp_ring[comp_ring_cons];
592                 cqe_fp = &cqe->fast_path_cqe;
593                 cqe_fp_flags = cqe_fp->type_error_flags;
594                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
595
596                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
597                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
598                    cqe_fp_flags, cqe_fp->status_flags,
599                    le32_to_cpu(cqe_fp->rss_hash_result),
600                    le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
601
602                 /* is this a slowpath msg? */
603                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
604                         bnx2x_sp_event(fp, cqe);
605                         goto next_cqe;
606
607                 /* this is an rx packet */
608                 } else {
609                         rx_buf = &fp->rx_buf_ring[bd_cons];
610                         skb = rx_buf->skb;
611                         prefetch(skb);
612
613                         if (!CQE_TYPE_FAST(cqe_fp_type)) {
614 #ifdef BNX2X_STOP_ON_ERROR
615                                 /* sanity check */
616                                 if (fp->disable_tpa &&
617                                     (CQE_TYPE_START(cqe_fp_type) ||
618                                      CQE_TYPE_STOP(cqe_fp_type)))
619                                         BNX2X_ERR("START/STOP packet while "
620                                                   "disable_tpa type %x\n",
621                                                   CQE_TYPE(cqe_fp_type));
622 #endif
623
624                                 if (CQE_TYPE_START(cqe_fp_type)) {
625                                         u16 queue = cqe_fp->queue_index;
626                                         DP(NETIF_MSG_RX_STATUS,
627                                            "calling tpa_start on queue %d\n",
628                                            queue);
629
630                                         bnx2x_tpa_start(fp, queue, skb,
631                                                         bd_cons, bd_prod,
632                                                         cqe_fp);
633
634                                         /* Set Toeplitz hash for LRO skb */
635                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
636
637                                         goto next_rx;
638
639                                 } else {
640                                         u16 queue =
641                                                 cqe->end_agg_cqe.queue_index;
642                                         DP(NETIF_MSG_RX_STATUS,
643                                            "calling tpa_stop on queue %d\n",
644                                            queue);
645
646                                         bnx2x_tpa_stop(bp, fp, queue,
647                                                        &cqe->end_agg_cqe,
648                                                        comp_ring_cons);
649 #ifdef BNX2X_STOP_ON_ERROR
650                                         if (bp->panic)
651                                                 return 0;
652 #endif
653
654                                         bnx2x_update_sge_prod(fp, cqe_fp);
655                                         goto next_cqe;
656                                 }
657                         }
658                         /* non TPA */
659                         len = le16_to_cpu(cqe_fp->pkt_len);
660                         pad = cqe_fp->placement_offset;
661                         dma_sync_single_for_device(&bp->pdev->dev,
662                                         dma_unmap_addr(rx_buf, mapping),
663                                                        pad + RX_COPY_THRESH,
664                                                        DMA_FROM_DEVICE);
665                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
666
667                         /* is this an error packet? */
668                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
669                                 DP(NETIF_MSG_RX_ERR,
670                                    "ERROR  flags %x  rx packet %u\n",
671                                    cqe_fp_flags, sw_comp_cons);
672                                 fp->eth_q_stats.rx_err_discard_pkt++;
673                                 goto reuse_rx;
674                         }
675
676                         /* Since we don't have a jumbo ring
677                          * copy small packets if mtu > 1500
678                          */
679                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
680                             (len <= RX_COPY_THRESH)) {
681                                 struct sk_buff *new_skb;
682
683                                 new_skb = netdev_alloc_skb(bp->dev, len + pad);
684                                 if (new_skb == NULL) {
685                                         DP(NETIF_MSG_RX_ERR,
686                                            "ERROR  packet dropped "
687                                            "because of alloc failure\n");
688                                         fp->eth_q_stats.rx_skb_alloc_failed++;
689                                         goto reuse_rx;
690                                 }
691
692                                 /* aligned copy */
693                                 skb_copy_from_linear_data_offset(skb, pad,
694                                                     new_skb->data + pad, len);
695                                 skb_reserve(new_skb, pad);
696                                 skb_put(new_skb, len);
697
698                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
699
700                                 skb = new_skb;
701
702                         } else
703                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
704                                 dma_unmap_single(&bp->pdev->dev,
705                                         dma_unmap_addr(rx_buf, mapping),
706                                                  fp->rx_buf_size,
707                                                  DMA_FROM_DEVICE);
708                                 skb_reserve(skb, pad);
709                                 skb_put(skb, len);
710
711                         } else {
712                                 DP(NETIF_MSG_RX_ERR,
713                                    "ERROR  packet dropped because "
714                                    "of alloc failure\n");
715                                 fp->eth_q_stats.rx_skb_alloc_failed++;
716 reuse_rx:
717                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
718                                 goto next_rx;
719                         }
720
721                         skb->protocol = eth_type_trans(skb, bp->dev);
722
723                         /* Set Toeplitz hash for a none-LRO skb */
724                         bnx2x_set_skb_rxhash(bp, cqe, skb);
725
726                         skb_checksum_none_assert(skb);
727
728                         if (bp->dev->features & NETIF_F_RXCSUM) {
729
730                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
731                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
732                                 else
733                                         fp->eth_q_stats.hw_csum_err++;
734                         }
735                 }
736
737                 skb_record_rx_queue(skb, fp->index);
738
739                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
740                     PARSING_FLAGS_VLAN)
741                         __vlan_hwaccel_put_tag(skb,
742                                                le16_to_cpu(cqe_fp->vlan_tag));
743                 napi_gro_receive(&fp->napi, skb);
744
745
746 next_rx:
747                 rx_buf->skb = NULL;
748
749                 bd_cons = NEXT_RX_IDX(bd_cons);
750                 bd_prod = NEXT_RX_IDX(bd_prod);
751                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
752                 rx_pkt++;
753 next_cqe:
754                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
755                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
756
757                 if (rx_pkt == budget)
758                         break;
759         } /* while */
760
761         fp->rx_bd_cons = bd_cons;
762         fp->rx_bd_prod = bd_prod_fw;
763         fp->rx_comp_cons = sw_comp_cons;
764         fp->rx_comp_prod = sw_comp_prod;
765
766         /* Update producers */
767         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
768                              fp->rx_sge_prod);
769
770         fp->rx_pkt += rx_pkt;
771         fp->rx_calls++;
772
773         return rx_pkt;
774 }
775
776 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
777 {
778         struct bnx2x_fastpath *fp = fp_cookie;
779         struct bnx2x *bp = fp->bp;
780
781         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
782                          "[fp %d fw_sd %d igusb %d]\n",
783            fp->index, fp->fw_sb_id, fp->igu_sb_id);
784         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
785
786 #ifdef BNX2X_STOP_ON_ERROR
787         if (unlikely(bp->panic))
788                 return IRQ_HANDLED;
789 #endif
790
791         /* Handle Rx and Tx according to MSI-X vector */
792         prefetch(fp->rx_cons_sb);
793         prefetch(fp->tx_cons_sb);
794         prefetch(&fp->sb_running_index[SM_RX_ID]);
795         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
796
797         return IRQ_HANDLED;
798 }
799
800 /* HW Lock for shared dual port PHYs */
801 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
802 {
803         mutex_lock(&bp->port.phy_mutex);
804
805         if (bp->port.need_hw_lock)
806                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
807 }
808
809 void bnx2x_release_phy_lock(struct bnx2x *bp)
810 {
811         if (bp->port.need_hw_lock)
812                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
813
814         mutex_unlock(&bp->port.phy_mutex);
815 }
816
817 /* calculates MF speed according to current linespeed and MF configuration */
818 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
819 {
820         u16 line_speed = bp->link_vars.line_speed;
821         if (IS_MF(bp)) {
822                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
823                                                    bp->mf_config[BP_VN(bp)]);
824
825                 /* Calculate the current MAX line speed limit for the MF
826                  * devices
827                  */
828                 if (IS_MF_SI(bp))
829                         line_speed = (line_speed * maxCfg) / 100;
830                 else { /* SD mode */
831                         u16 vn_max_rate = maxCfg * 100;
832
833                         if (vn_max_rate < line_speed)
834                                 line_speed = vn_max_rate;
835                 }
836         }
837
838         return line_speed;
839 }
840
841 /**
842  * bnx2x_fill_report_data - fill link report data to report
843  *
844  * @bp:         driver handle
845  * @data:       link state to update
846  *
847  * It uses a none-atomic bit operations because is called under the mutex.
848  */
849 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
850                                           struct bnx2x_link_report_data *data)
851 {
852         u16 line_speed = bnx2x_get_mf_speed(bp);
853
854         memset(data, 0, sizeof(*data));
855
856         /* Fill the report data: efective line speed */
857         data->line_speed = line_speed;
858
859         /* Link is down */
860         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
861                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
862                           &data->link_report_flags);
863
864         /* Full DUPLEX */
865         if (bp->link_vars.duplex == DUPLEX_FULL)
866                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
867
868         /* Rx Flow Control is ON */
869         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
870                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
871
872         /* Tx Flow Control is ON */
873         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
874                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
875 }
876
877 /**
878  * bnx2x_link_report - report link status to OS.
879  *
880  * @bp:         driver handle
881  *
882  * Calls the __bnx2x_link_report() under the same locking scheme
883  * as a link/PHY state managing code to ensure a consistent link
884  * reporting.
885  */
886
887 void bnx2x_link_report(struct bnx2x *bp)
888 {
889         bnx2x_acquire_phy_lock(bp);
890         __bnx2x_link_report(bp);
891         bnx2x_release_phy_lock(bp);
892 }
893
894 /**
895  * __bnx2x_link_report - report link status to OS.
896  *
897  * @bp:         driver handle
898  *
899  * None atomic inmlementation.
900  * Should be called under the phy_lock.
901  */
902 void __bnx2x_link_report(struct bnx2x *bp)
903 {
904         struct bnx2x_link_report_data cur_data;
905
906         /* reread mf_cfg */
907         if (!CHIP_IS_E1(bp))
908                 bnx2x_read_mf_cfg(bp);
909
910         /* Read the current link report info */
911         bnx2x_fill_report_data(bp, &cur_data);
912
913         /* Don't report link down or exactly the same link status twice */
914         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
915             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
916                       &bp->last_reported_link.link_report_flags) &&
917              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
918                       &cur_data.link_report_flags)))
919                 return;
920
921         bp->link_cnt++;
922
923         /* We are going to report a new link parameters now -
924          * remember the current data for the next time.
925          */
926         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
927
928         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
929                      &cur_data.link_report_flags)) {
930                 netif_carrier_off(bp->dev);
931                 netdev_err(bp->dev, "NIC Link is Down\n");
932                 return;
933         } else {
934                 netif_carrier_on(bp->dev);
935                 netdev_info(bp->dev, "NIC Link is Up, ");
936                 pr_cont("%d Mbps ", cur_data.line_speed);
937
938                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
939                                        &cur_data.link_report_flags))
940                         pr_cont("full duplex");
941                 else
942                         pr_cont("half duplex");
943
944                 /* Handle the FC at the end so that only these flags would be
945                  * possibly set. This way we may easily check if there is no FC
946                  * enabled.
947                  */
948                 if (cur_data.link_report_flags) {
949                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
950                                      &cur_data.link_report_flags)) {
951                                 pr_cont(", receive ");
952                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
953                                      &cur_data.link_report_flags))
954                                         pr_cont("& transmit ");
955                         } else {
956                                 pr_cont(", transmit ");
957                         }
958                         pr_cont("flow control ON");
959                 }
960                 pr_cont("\n");
961         }
962 }
963
964 void bnx2x_init_rx_rings(struct bnx2x *bp)
965 {
966         int func = BP_FUNC(bp);
967         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
968                                               ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
969         u16 ring_prod;
970         int i, j;
971
972         /* Allocate TPA resources */
973         for_each_rx_queue(bp, j) {
974                 struct bnx2x_fastpath *fp = &bp->fp[j];
975
976                 DP(NETIF_MSG_IFUP,
977                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
978
979                 if (!fp->disable_tpa) {
980                         /* Fill the per-aggregtion pool */
981                         for (i = 0; i < max_agg_queues; i++) {
982                                 struct bnx2x_agg_info *tpa_info =
983                                         &fp->tpa_info[i];
984                                 struct sw_rx_bd *first_buf =
985                                         &tpa_info->first_buf;
986
987                                 first_buf->skb = netdev_alloc_skb(bp->dev,
988                                                        fp->rx_buf_size);
989                                 if (!first_buf->skb) {
990                                         BNX2X_ERR("Failed to allocate TPA "
991                                                   "skb pool for queue[%d] - "
992                                                   "disabling TPA on this "
993                                                   "queue!\n", j);
994                                         bnx2x_free_tpa_pool(bp, fp, i);
995                                         fp->disable_tpa = 1;
996                                         break;
997                                 }
998                                 dma_unmap_addr_set(first_buf, mapping, 0);
999                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1000                         }
1001
1002                         /* "next page" elements initialization */
1003                         bnx2x_set_next_page_sgl(fp);
1004
1005                         /* set SGEs bit mask */
1006                         bnx2x_init_sge_ring_bit_mask(fp);
1007
1008                         /* Allocate SGEs and initialize the ring elements */
1009                         for (i = 0, ring_prod = 0;
1010                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1011
1012                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1013                                         BNX2X_ERR("was only able to allocate "
1014                                                   "%d rx sges\n", i);
1015                                         BNX2X_ERR("disabling TPA for "
1016                                                   "queue[%d]\n", j);
1017                                         /* Cleanup already allocated elements */
1018                                         bnx2x_free_rx_sge_range(bp, fp,
1019                                                                 ring_prod);
1020                                         bnx2x_free_tpa_pool(bp, fp,
1021                                                             max_agg_queues);
1022                                         fp->disable_tpa = 1;
1023                                         ring_prod = 0;
1024                                         break;
1025                                 }
1026                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1027                         }
1028
1029                         fp->rx_sge_prod = ring_prod;
1030                 }
1031         }
1032
1033         for_each_rx_queue(bp, j) {
1034                 struct bnx2x_fastpath *fp = &bp->fp[j];
1035
1036                 fp->rx_bd_cons = 0;
1037
1038                 /* Activate BD ring */
1039                 /* Warning!
1040                  * this will generate an interrupt (to the TSTORM)
1041                  * must only be done after chip is initialized
1042                  */
1043                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1044                                      fp->rx_sge_prod);
1045
1046                 if (j != 0)
1047                         continue;
1048
1049                 if (CHIP_IS_E1(bp)) {
1050                         REG_WR(bp, BAR_USTRORM_INTMEM +
1051                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1052                                U64_LO(fp->rx_comp_mapping));
1053                         REG_WR(bp, BAR_USTRORM_INTMEM +
1054                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1055                                U64_HI(fp->rx_comp_mapping));
1056                 }
1057         }
1058 }
1059
1060 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1061 {
1062         int i;
1063
1064         for_each_tx_queue(bp, i) {
1065                 struct bnx2x_fastpath *fp = &bp->fp[i];
1066
1067                 u16 bd_cons = fp->tx_bd_cons;
1068                 u16 sw_prod = fp->tx_pkt_prod;
1069                 u16 sw_cons = fp->tx_pkt_cons;
1070
1071                 while (sw_cons != sw_prod) {
1072                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1073                         sw_cons++;
1074                 }
1075         }
1076 }
1077
1078 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1079 {
1080         struct bnx2x *bp = fp->bp;
1081         int i;
1082
1083         /* ring wasn't allocated */
1084         if (fp->rx_buf_ring == NULL)
1085                 return;
1086
1087         for (i = 0; i < NUM_RX_BD; i++) {
1088                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1089                 struct sk_buff *skb = rx_buf->skb;
1090
1091                 if (skb == NULL)
1092                         continue;
1093                 dma_unmap_single(&bp->pdev->dev,
1094                                  dma_unmap_addr(rx_buf, mapping),
1095                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1096
1097                 rx_buf->skb = NULL;
1098                 dev_kfree_skb(skb);
1099         }
1100 }
1101
1102 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1103 {
1104         int j;
1105
1106         for_each_rx_queue(bp, j) {
1107                 struct bnx2x_fastpath *fp = &bp->fp[j];
1108
1109                 bnx2x_free_rx_bds(fp);
1110
1111                 if (!fp->disable_tpa)
1112                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1113                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
1114                                             ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
1115         }
1116 }
1117
1118 void bnx2x_free_skbs(struct bnx2x *bp)
1119 {
1120         bnx2x_free_tx_skbs(bp);
1121         bnx2x_free_rx_skbs(bp);
1122 }
1123
1124 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1125 {
1126         /* load old values */
1127         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1128
1129         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1130                 /* leave all but MAX value */
1131                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1132
1133                 /* set new MAX value */
1134                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1135                                 & FUNC_MF_CFG_MAX_BW_MASK;
1136
1137                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1138         }
1139 }
1140
1141 /**
1142  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1143  *
1144  * @bp:         driver handle
1145  * @nvecs:      number of vectors to be released
1146  */
1147 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1148 {
1149         int i, offset = 0;
1150
1151         if (nvecs == offset)
1152                 return;
1153         free_irq(bp->msix_table[offset].vector, bp->dev);
1154         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1155            bp->msix_table[offset].vector);
1156         offset++;
1157 #ifdef BCM_CNIC
1158         if (nvecs == offset)
1159                 return;
1160         offset++;
1161 #endif
1162
1163         for_each_eth_queue(bp, i) {
1164                 if (nvecs == offset)
1165                         return;
1166                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1167                    "irq\n", i, bp->msix_table[offset].vector);
1168
1169                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1170         }
1171 }
1172
1173 void bnx2x_free_irq(struct bnx2x *bp)
1174 {
1175         if (bp->flags & USING_MSIX_FLAG)
1176                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1177                                      CNIC_CONTEXT_USE + 1);
1178         else if (bp->flags & USING_MSI_FLAG)
1179                 free_irq(bp->pdev->irq, bp->dev);
1180         else
1181                 free_irq(bp->pdev->irq, bp->dev);
1182 }
1183
1184 int bnx2x_enable_msix(struct bnx2x *bp)
1185 {
1186         int msix_vec = 0, i, rc, req_cnt;
1187
1188         bp->msix_table[msix_vec].entry = msix_vec;
1189         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1190            bp->msix_table[0].entry);
1191         msix_vec++;
1192
1193 #ifdef BCM_CNIC
1194         bp->msix_table[msix_vec].entry = msix_vec;
1195         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1196            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1197         msix_vec++;
1198 #endif
1199         for_each_eth_queue(bp, i) {
1200                 bp->msix_table[msix_vec].entry = msix_vec;
1201                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1202                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1203                 msix_vec++;
1204         }
1205
1206         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1207
1208         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1209
1210         /*
1211          * reconfigure number of tx/rx queues according to available
1212          * MSI-X vectors
1213          */
1214         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1215                 /* how less vectors we will have? */
1216                 int diff = req_cnt - rc;
1217
1218                 DP(NETIF_MSG_IFUP,
1219                    "Trying to use less MSI-X vectors: %d\n", rc);
1220
1221                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1222
1223                 if (rc) {
1224                         DP(NETIF_MSG_IFUP,
1225                            "MSI-X is not attainable  rc %d\n", rc);
1226                         return rc;
1227                 }
1228                 /*
1229                  * decrease number of queues by number of unallocated entries
1230                  */
1231                 bp->num_queues -= diff;
1232
1233                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1234                                   bp->num_queues);
1235         } else if (rc) {
1236                 /* fall to INTx if not enough memory */
1237                 if (rc == -ENOMEM)
1238                         bp->flags |= DISABLE_MSI_FLAG;
1239                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1240                 return rc;
1241         }
1242
1243         bp->flags |= USING_MSIX_FLAG;
1244
1245         return 0;
1246 }
1247
1248 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1249 {
1250         int i, rc, offset = 0;
1251
1252         rc = request_irq(bp->msix_table[offset++].vector,
1253                          bnx2x_msix_sp_int, 0,
1254                          bp->dev->name, bp->dev);
1255         if (rc) {
1256                 BNX2X_ERR("request sp irq failed\n");
1257                 return -EBUSY;
1258         }
1259
1260 #ifdef BCM_CNIC
1261         offset++;
1262 #endif
1263         for_each_eth_queue(bp, i) {
1264                 struct bnx2x_fastpath *fp = &bp->fp[i];
1265                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1266                          bp->dev->name, i);
1267
1268                 rc = request_irq(bp->msix_table[offset].vector,
1269                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1270                 if (rc) {
1271                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1272                               bp->msix_table[offset].vector, rc);
1273                         bnx2x_free_msix_irqs(bp, offset);
1274                         return -EBUSY;
1275                 }
1276
1277                 offset++;
1278         }
1279
1280         i = BNX2X_NUM_ETH_QUEUES(bp);
1281         offset = 1 + CNIC_CONTEXT_USE;
1282         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1283                " ... fp[%d] %d\n",
1284                bp->msix_table[0].vector,
1285                0, bp->msix_table[offset].vector,
1286                i - 1, bp->msix_table[offset + i - 1].vector);
1287
1288         return 0;
1289 }
1290
1291 int bnx2x_enable_msi(struct bnx2x *bp)
1292 {
1293         int rc;
1294
1295         rc = pci_enable_msi(bp->pdev);
1296         if (rc) {
1297                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1298                 return -1;
1299         }
1300         bp->flags |= USING_MSI_FLAG;
1301
1302         return 0;
1303 }
1304
1305 static int bnx2x_req_irq(struct bnx2x *bp)
1306 {
1307         unsigned long flags;
1308         int rc;
1309
1310         if (bp->flags & USING_MSI_FLAG)
1311                 flags = 0;
1312         else
1313                 flags = IRQF_SHARED;
1314
1315         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1316                          bp->dev->name, bp->dev);
1317         return rc;
1318 }
1319
1320 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1321 {
1322         int rc = 0;
1323         if (bp->flags & USING_MSIX_FLAG) {
1324                 rc = bnx2x_req_msix_irqs(bp);
1325                 if (rc)
1326                         return rc;
1327         } else {
1328                 bnx2x_ack_int(bp);
1329                 rc = bnx2x_req_irq(bp);
1330                 if (rc) {
1331                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1332                         return rc;
1333                 }
1334                 if (bp->flags & USING_MSI_FLAG) {
1335                         bp->dev->irq = bp->pdev->irq;
1336                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
1337                                bp->pdev->irq);
1338                 }
1339         }
1340
1341         return 0;
1342 }
1343
1344 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1345 {
1346         int i;
1347
1348         for_each_rx_queue(bp, i)
1349                 napi_enable(&bnx2x_fp(bp, i, napi));
1350 }
1351
1352 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1353 {
1354         int i;
1355
1356         for_each_rx_queue(bp, i)
1357                 napi_disable(&bnx2x_fp(bp, i, napi));
1358 }
1359
1360 void bnx2x_netif_start(struct bnx2x *bp)
1361 {
1362         if (netif_running(bp->dev)) {
1363                 bnx2x_napi_enable(bp);
1364                 bnx2x_int_enable(bp);
1365                 if (bp->state == BNX2X_STATE_OPEN)
1366                         netif_tx_wake_all_queues(bp->dev);
1367         }
1368 }
1369
1370 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1371 {
1372         bnx2x_int_disable_sync(bp, disable_hw);
1373         bnx2x_napi_disable(bp);
1374 }
1375
1376 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1377 {
1378 #ifdef BCM_CNIC
1379         struct bnx2x *bp = netdev_priv(dev);
1380         if (NO_FCOE(bp))
1381                 return skb_tx_hash(dev, skb);
1382         else {
1383                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1384                 u16 ether_type = ntohs(hdr->h_proto);
1385
1386                 /* Skip VLAN tag if present */
1387                 if (ether_type == ETH_P_8021Q) {
1388                         struct vlan_ethhdr *vhdr =
1389                                 (struct vlan_ethhdr *)skb->data;
1390
1391                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1392                 }
1393
1394                 /* If ethertype is FCoE or FIP - use FCoE ring */
1395                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1396                         return bnx2x_fcoe(bp, index);
1397         }
1398 #endif
1399         /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
1400          */
1401         return __skb_tx_hash(dev, skb,
1402                         dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1403 }
1404
1405 void bnx2x_set_num_queues(struct bnx2x *bp)
1406 {
1407         switch (bp->multi_mode) {
1408         case ETH_RSS_MODE_DISABLED:
1409                 bp->num_queues = 1;
1410                 break;
1411         case ETH_RSS_MODE_REGULAR:
1412                 bp->num_queues = bnx2x_calc_num_queues(bp);
1413                 break;
1414
1415         default:
1416                 bp->num_queues = 1;
1417                 break;
1418         }
1419
1420         /* Add special queues */
1421         bp->num_queues += NONE_ETH_CONTEXT_USE;
1422 }
1423
1424 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1425 {
1426         int rc, num = bp->num_queues;
1427
1428 #ifdef BCM_CNIC
1429         if (NO_FCOE(bp))
1430                 num -= FCOE_CONTEXT_USE;
1431
1432 #endif
1433         netif_set_real_num_tx_queues(bp->dev, num);
1434         rc = netif_set_real_num_rx_queues(bp->dev, num);
1435         return rc;
1436 }
1437
1438 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1439 {
1440         int i;
1441
1442         for_each_queue(bp, i) {
1443                 struct bnx2x_fastpath *fp = &bp->fp[i];
1444
1445                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1446                 if (IS_FCOE_IDX(i))
1447                         /*
1448                          * Although there are no IP frames expected to arrive to
1449                          * this ring we still want to add an
1450                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1451                          * overrun attack.
1452                          */
1453                         fp->rx_buf_size =
1454                                 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1455                                 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1456                 else
1457                         fp->rx_buf_size =
1458                                 bp->dev->mtu + ETH_OVREHEAD +
1459                                 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1460         }
1461 }
1462
1463 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1464 {
1465         int i;
1466         u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1467         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1468
1469         /*
1470          * Prepare the inital contents fo the indirection table if RSS is
1471          * enabled
1472          */
1473         if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1474                 for (i = 0; i < sizeof(ind_table); i++)
1475                         ind_table[i] =
1476                                 bp->fp->cl_id + (i % num_eth_queues);
1477         }
1478
1479         /*
1480          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1481          * per-port, so if explicit configuration is needed , do it only
1482          * for a PMF.
1483          *
1484          * For 57712 and newer on the other hand it's a per-function
1485          * configuration.
1486          */
1487         return bnx2x_config_rss_pf(bp, ind_table,
1488                                    bp->port.pmf || !CHIP_IS_E1x(bp));
1489 }
1490
1491 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1492 {
1493         struct bnx2x_config_rss_params params = {0};
1494         int i;
1495
1496         /* Although RSS is meaningless when there is a single HW queue we
1497          * still need it enabled in order to have HW Rx hash generated.
1498          *
1499          * if (!is_eth_multi(bp))
1500          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1501          */
1502
1503         params.rss_obj = &bp->rss_conf_obj;
1504
1505         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1506
1507         /* RSS mode */
1508         switch (bp->multi_mode) {
1509         case ETH_RSS_MODE_DISABLED:
1510                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1511                 break;
1512         case ETH_RSS_MODE_REGULAR:
1513                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1514                 break;
1515         case ETH_RSS_MODE_VLAN_PRI:
1516                 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1517                 break;
1518         case ETH_RSS_MODE_E1HOV_PRI:
1519                 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1520                 break;
1521         case ETH_RSS_MODE_IP_DSCP:
1522                 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1523                 break;
1524         default:
1525                 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1526                 return -EINVAL;
1527         }
1528
1529         /* If RSS is enabled */
1530         if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1531                 /* RSS configuration */
1532                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1533                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1534                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1535                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1536
1537                 /* Hash bits */
1538                 params.rss_result_mask = MULTI_MASK;
1539
1540                 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1541
1542                 if (config_hash) {
1543                         /* RSS keys */
1544                         for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1545                                 params.rss_key[i] = random32();
1546
1547                         __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1548                 }
1549         }
1550
1551         return bnx2x_config_rss(bp, &params);
1552 }
1553
1554 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1555 {
1556         struct bnx2x_func_state_params func_params = {0};
1557
1558         /* Prepare parameters for function state transitions */
1559         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1560
1561         func_params.f_obj = &bp->func_obj;
1562         func_params.cmd = BNX2X_F_CMD_HW_INIT;
1563
1564         func_params.params.hw_init.load_phase = load_code;
1565
1566         return bnx2x_func_state_change(bp, &func_params);
1567 }
1568
1569 /*
1570  * Cleans the object that have internal lists without sending
1571  * ramrods. Should be run when interrutps are disabled.
1572  */
1573 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1574 {
1575         int rc;
1576         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1577         struct bnx2x_mcast_ramrod_params rparam = {0};
1578         struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1579
1580         /***************** Cleanup MACs' object first *************************/
1581
1582         /* Wait for completion of requested */
1583         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1584         /* Perform a dry cleanup */
1585         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1586
1587         /* Clean ETH primary MAC */
1588         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1589         rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1590                                  &ramrod_flags);
1591         if (rc != 0)
1592                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1593
1594         /* Cleanup UC list */
1595         vlan_mac_flags = 0;
1596         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1597         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1598                                  &ramrod_flags);
1599         if (rc != 0)
1600                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1601
1602         /***************** Now clean mcast object *****************************/
1603         rparam.mcast_obj = &bp->mcast_obj;
1604         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1605
1606         /* Add a DEL command... */
1607         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1608         if (rc < 0)
1609                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1610                           "object: %d\n", rc);
1611
1612         /* ...and wait until all pending commands are cleared */
1613         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1614         while (rc != 0) {
1615                 if (rc < 0) {
1616                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1617                                   rc);
1618                         return;
1619                 }
1620
1621                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1622         }
1623 }
1624
1625 #ifndef BNX2X_STOP_ON_ERROR
1626 #define LOAD_ERROR_EXIT(bp, label) \
1627         do { \
1628                 (bp)->state = BNX2X_STATE_ERROR; \
1629                 goto label; \
1630         } while (0)
1631 #else
1632 #define LOAD_ERROR_EXIT(bp, label) \
1633         do { \
1634                 (bp)->state = BNX2X_STATE_ERROR; \
1635                 (bp)->panic = 1; \
1636                 return -EBUSY; \
1637         } while (0)
1638 #endif
1639
1640 /* must be called with rtnl_lock */
1641 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1642 {
1643         int port = BP_PORT(bp);
1644         u32 load_code;
1645         int i, rc;
1646
1647 #ifdef BNX2X_STOP_ON_ERROR
1648         if (unlikely(bp->panic))
1649                 return -EPERM;
1650 #endif
1651
1652         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1653
1654         /* Set the initial link reported state to link down */
1655         bnx2x_acquire_phy_lock(bp);
1656         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1657         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1658                 &bp->last_reported_link.link_report_flags);
1659         bnx2x_release_phy_lock(bp);
1660
1661         /* must be called before memory allocation and HW init */
1662         bnx2x_ilt_set_info(bp);
1663
1664         /* zero fastpath structures preserving invariants like napi which are
1665          * allocated only once
1666          */
1667         for_each_queue(bp, i)
1668                 bnx2x_bz_fp(bp, i);
1669
1670         /* Set the receive queues buffer size */
1671         bnx2x_set_rx_buf_size(bp);
1672
1673         /*
1674          * set the tpa flag for each queue. The tpa flag determines the queue
1675          * minimal size so it must be set prior to queue memory allocation
1676          */
1677         for_each_queue(bp, i)
1678                 bnx2x_fp(bp, i, disable_tpa) =
1679                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1680
1681 #ifdef BCM_CNIC
1682         /* We don't want TPA on FCoE L2 ring */
1683         bnx2x_fcoe(bp, disable_tpa) = 1;
1684 #endif
1685
1686         if (bnx2x_alloc_mem(bp))
1687                 return -ENOMEM;
1688
1689         /* As long as bnx2x_alloc_mem() may possibly update
1690          * bp->num_queues, bnx2x_set_real_num_queues() should always
1691          * come after it.
1692          */
1693         rc = bnx2x_set_real_num_queues(bp);
1694         if (rc) {
1695                 BNX2X_ERR("Unable to set real_num_queues\n");
1696                 LOAD_ERROR_EXIT(bp, load_error0);
1697         }
1698
1699         bnx2x_napi_enable(bp);
1700
1701         /* Send LOAD_REQUEST command to MCP
1702          * Returns the type of LOAD command:
1703          * if it is the first port to be initialized
1704          * common blocks should be initialized, otherwise - not
1705          */
1706         if (!BP_NOMCP(bp)) {
1707                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1708                 if (!load_code) {
1709                         BNX2X_ERR("MCP response failure, aborting\n");
1710                         rc = -EBUSY;
1711                         LOAD_ERROR_EXIT(bp, load_error1);
1712                 }
1713                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1714                         rc = -EBUSY; /* other port in diagnostic mode */
1715                         LOAD_ERROR_EXIT(bp, load_error1);
1716                 }
1717
1718         } else {
1719                 int path = BP_PATH(bp);
1720
1721                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1722                    path, load_count[path][0], load_count[path][1],
1723                    load_count[path][2]);
1724                 load_count[path][0]++;
1725                 load_count[path][1 + port]++;
1726                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1727                    path, load_count[path][0], load_count[path][1],
1728                    load_count[path][2]);
1729                 if (load_count[path][0] == 1)
1730                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1731                 else if (load_count[path][1 + port] == 1)
1732                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1733                 else
1734                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1735         }
1736
1737         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1738             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1739             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1740                 bp->port.pmf = 1;
1741         else
1742                 bp->port.pmf = 0;
1743         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1744
1745         /* Init Function state controlling object */
1746         bnx2x__init_func_obj(bp);
1747
1748         /* Initialize HW */
1749         rc = bnx2x_init_hw(bp, load_code);
1750         if (rc) {
1751                 BNX2X_ERR("HW init failed, aborting\n");
1752                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1753                 LOAD_ERROR_EXIT(bp, load_error2);
1754         }
1755
1756         /* Connect to IRQs */
1757         rc = bnx2x_setup_irqs(bp);
1758         if (rc) {
1759                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1760                 LOAD_ERROR_EXIT(bp, load_error2);
1761         }
1762
1763         /* Setup NIC internals and enable interrupts */
1764         bnx2x_nic_init(bp, load_code);
1765
1766         /* Init per-function objects */
1767         bnx2x_init_bp_objs(bp);
1768
1769         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1770             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1771             (bp->common.shmem2_base)) {
1772                 if (SHMEM2_HAS(bp, dcc_support))
1773                         SHMEM2_WR(bp, dcc_support,
1774                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1775                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1776         }
1777
1778         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1779         rc = bnx2x_func_start(bp);
1780         if (rc) {
1781                 BNX2X_ERR("Function start failed!\n");
1782                 LOAD_ERROR_EXIT(bp, load_error3);
1783         }
1784
1785         /* Send LOAD_DONE command to MCP */
1786         if (!BP_NOMCP(bp)) {
1787                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1788                 if (!load_code) {
1789                         BNX2X_ERR("MCP response failure, aborting\n");
1790                         rc = -EBUSY;
1791                         LOAD_ERROR_EXIT(bp, load_error3);
1792                 }
1793         }
1794
1795         rc = bnx2x_setup_leading(bp);
1796         if (rc) {
1797                 BNX2X_ERR("Setup leading failed!\n");
1798                 LOAD_ERROR_EXIT(bp, load_error3);
1799         }
1800
1801 #ifdef BCM_CNIC
1802         /* Enable Timer scan */
1803         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1804 #endif
1805
1806         for_each_nondefault_queue(bp, i) {
1807                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1808                 if (rc)
1809                         LOAD_ERROR_EXIT(bp, load_error4);
1810         }
1811
1812         rc = bnx2x_init_rss_pf(bp);
1813         if (rc)
1814                 LOAD_ERROR_EXIT(bp, load_error4);
1815
1816         /* Now when Clients are configured we are ready to work */
1817         bp->state = BNX2X_STATE_OPEN;
1818
1819         /* Configure a ucast MAC */
1820         rc = bnx2x_set_eth_mac(bp, true);
1821         if (rc)
1822                 LOAD_ERROR_EXIT(bp, load_error4);
1823
1824         if (bp->pending_max) {
1825                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1826                 bp->pending_max = 0;
1827         }
1828
1829         if (bp->port.pmf)
1830                 bnx2x_initial_phy_init(bp, load_mode);
1831
1832         /* Start fast path */
1833
1834         /* Initialize Rx filter. */
1835         netif_addr_lock_bh(bp->dev);
1836         bnx2x_set_rx_mode(bp->dev);
1837         netif_addr_unlock_bh(bp->dev);
1838
1839         /* Start the Tx */
1840         switch (load_mode) {
1841         case LOAD_NORMAL:
1842                 /* Tx queue should be only reenabled */
1843                 netif_tx_wake_all_queues(bp->dev);
1844                 break;
1845
1846         case LOAD_OPEN:
1847                 netif_tx_start_all_queues(bp->dev);
1848                 smp_mb__after_clear_bit();
1849                 break;
1850
1851         case LOAD_DIAG:
1852                 bp->state = BNX2X_STATE_DIAG;
1853                 break;
1854
1855         default:
1856                 break;
1857         }
1858
1859         if (!bp->port.pmf)
1860                 bnx2x__link_status_update(bp);
1861
1862         /* start the timer */
1863         mod_timer(&bp->timer, jiffies + bp->current_interval);
1864
1865 #ifdef BCM_CNIC
1866         bnx2x_setup_cnic_irq_info(bp);
1867         if (bp->state == BNX2X_STATE_OPEN)
1868                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1869 #endif
1870         bnx2x_inc_load_cnt(bp);
1871
1872         /* Wait for all pending SP commands to complete */
1873         if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1874                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1875                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1876                 return -EBUSY;
1877         }
1878
1879         bnx2x_dcbx_init(bp);
1880         return 0;
1881
1882 #ifndef BNX2X_STOP_ON_ERROR
1883 load_error4:
1884 #ifdef BCM_CNIC
1885         /* Disable Timer scan */
1886         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
1887 #endif
1888 load_error3:
1889         bnx2x_int_disable_sync(bp, 1);
1890
1891         /* Clean queueable objects */
1892         bnx2x_squeeze_objects(bp);
1893
1894         /* Free SKBs, SGEs, TPA pool and driver internals */
1895         bnx2x_free_skbs(bp);
1896         for_each_rx_queue(bp, i)
1897                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1898
1899         /* Release IRQs */
1900         bnx2x_free_irq(bp);
1901 load_error2:
1902         if (!BP_NOMCP(bp)) {
1903                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1904                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1905         }
1906
1907         bp->port.pmf = 0;
1908 load_error1:
1909         bnx2x_napi_disable(bp);
1910 load_error0:
1911         bnx2x_free_mem(bp);
1912
1913         return rc;
1914 #endif /* ! BNX2X_STOP_ON_ERROR */
1915 }
1916
1917 /* must be called with rtnl_lock */
1918 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1919 {
1920         int i;
1921
1922         if (bp->state == BNX2X_STATE_CLOSED) {
1923                 /* Interface has been removed - nothing to recover */
1924                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1925                 bp->is_leader = 0;
1926                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1927                 smp_wmb();
1928
1929                 return -EINVAL;
1930         }
1931
1932 #ifdef BCM_CNIC
1933         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1934 #endif
1935         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1936         smp_mb();
1937
1938         bp->rx_mode = BNX2X_RX_MODE_NONE;
1939
1940         /* Stop Tx */
1941         bnx2x_tx_disable(bp);
1942
1943         del_timer_sync(&bp->timer);
1944
1945         /* Set ALWAYS_ALIVE bit in shmem */
1946         bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
1947
1948         bnx2x_drv_pulse(bp);
1949
1950         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1951
1952         /* Cleanup the chip if needed */
1953         if (unload_mode != UNLOAD_RECOVERY)
1954                 bnx2x_chip_cleanup(bp, unload_mode);
1955         else {
1956                 /* Disable HW interrupts, NAPI and Tx */
1957                 bnx2x_netif_stop(bp, 1);
1958
1959                 /* Release IRQs */
1960                 bnx2x_free_irq(bp);
1961         }
1962
1963         /*
1964          * At this stage no more interrupts will arrive so we may safly clean
1965          * the queueable objects here in case they failed to get cleaned so far.
1966          */
1967         bnx2x_squeeze_objects(bp);
1968
1969         bp->port.pmf = 0;
1970
1971         /* Free SKBs, SGEs, TPA pool and driver internals */
1972         bnx2x_free_skbs(bp);
1973         for_each_rx_queue(bp, i)
1974                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1975
1976         bnx2x_free_mem(bp);
1977
1978         bp->state = BNX2X_STATE_CLOSED;
1979
1980         /* The last driver must disable a "close the gate" if there is no
1981          * parity attention or "process kill" pending.
1982          */
1983         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1984             bnx2x_reset_is_done(bp))
1985                 bnx2x_disable_close_the_gate(bp);
1986
1987         /* Reset MCP mail box sequence if there is on going recovery */
1988         if (unload_mode == UNLOAD_RECOVERY)
1989                 bp->fw_seq = 0;
1990
1991         return 0;
1992 }
1993
1994 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1995 {
1996         u16 pmcsr;
1997
1998         /* If there is no power capability, silently succeed */
1999         if (!bp->pm_cap) {
2000                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2001                 return 0;
2002         }
2003
2004         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2005
2006         switch (state) {
2007         case PCI_D0:
2008                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2009                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2010                                        PCI_PM_CTRL_PME_STATUS));
2011
2012                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2013                         /* delay required during transition out of D3hot */
2014                         msleep(20);
2015                 break;
2016
2017         case PCI_D3hot:
2018                 /* If there are other clients above don't
2019                    shut down the power */
2020                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2021                         return 0;
2022                 /* Don't shut down the power for emulation and FPGA */
2023                 if (CHIP_REV_IS_SLOW(bp))
2024                         return 0;
2025
2026                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2027                 pmcsr |= 3;
2028
2029                 if (bp->wol)
2030                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2031
2032                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2033                                       pmcsr);
2034
2035                 /* No more memory access after this point until
2036                 * device is brought back to D0.
2037                 */
2038                 break;
2039
2040         default:
2041                 return -EINVAL;
2042         }
2043         return 0;
2044 }
2045
2046 /*
2047  * net_device service functions
2048  */
2049 int bnx2x_poll(struct napi_struct *napi, int budget)
2050 {
2051         int work_done = 0;
2052         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2053                                                  napi);
2054         struct bnx2x *bp = fp->bp;
2055
2056         while (1) {
2057 #ifdef BNX2X_STOP_ON_ERROR
2058                 if (unlikely(bp->panic)) {
2059                         napi_complete(napi);
2060                         return 0;
2061                 }
2062 #endif
2063
2064                 if (bnx2x_has_tx_work(fp))
2065                         bnx2x_tx_int(fp);
2066
2067                 if (bnx2x_has_rx_work(fp)) {
2068                         work_done += bnx2x_rx_int(fp, budget - work_done);
2069
2070                         /* must not complete if we consumed full budget */
2071                         if (work_done >= budget)
2072                                 break;
2073                 }
2074
2075                 /* Fall out from the NAPI loop if needed */
2076                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2077 #ifdef BCM_CNIC
2078                         /* No need to update SB for FCoE L2 ring as long as
2079                          * it's connected to the default SB and the SB
2080                          * has been updated when NAPI was scheduled.
2081                          */
2082                         if (IS_FCOE_FP(fp)) {
2083                                 napi_complete(napi);
2084                                 break;
2085                         }
2086 #endif
2087
2088                         bnx2x_update_fpsb_idx(fp);
2089                         /* bnx2x_has_rx_work() reads the status block,
2090                          * thus we need to ensure that status block indices
2091                          * have been actually read (bnx2x_update_fpsb_idx)
2092                          * prior to this check (bnx2x_has_rx_work) so that
2093                          * we won't write the "newer" value of the status block
2094                          * to IGU (if there was a DMA right after
2095                          * bnx2x_has_rx_work and if there is no rmb, the memory
2096                          * reading (bnx2x_update_fpsb_idx) may be postponed
2097                          * to right before bnx2x_ack_sb). In this case there
2098                          * will never be another interrupt until there is
2099                          * another update of the status block, while there
2100                          * is still unhandled work.
2101                          */
2102                         rmb();
2103
2104                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2105                                 napi_complete(napi);
2106                                 /* Re-enable interrupts */
2107                                 DP(NETIF_MSG_HW,
2108                                    "Update index to %d\n", fp->fp_hc_idx);
2109                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2110                                              le16_to_cpu(fp->fp_hc_idx),
2111                                              IGU_INT_ENABLE, 1);
2112                                 break;
2113                         }
2114                 }
2115         }
2116
2117         return work_done;
2118 }
2119
2120 /* we split the first BD into headers and data BDs
2121  * to ease the pain of our fellow microcode engineers
2122  * we use one mapping for both BDs
2123  * So far this has only been observed to happen
2124  * in Other Operating Systems(TM)
2125  */
2126 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2127                                    struct bnx2x_fastpath *fp,
2128                                    struct sw_tx_bd *tx_buf,
2129                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
2130                                    u16 bd_prod, int nbd)
2131 {
2132         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2133         struct eth_tx_bd *d_tx_bd;
2134         dma_addr_t mapping;
2135         int old_len = le16_to_cpu(h_tx_bd->nbytes);
2136
2137         /* first fix first BD */
2138         h_tx_bd->nbd = cpu_to_le16(nbd);
2139         h_tx_bd->nbytes = cpu_to_le16(hlen);
2140
2141         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2142            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2143            h_tx_bd->addr_lo, h_tx_bd->nbd);
2144
2145         /* now get a new data BD
2146          * (after the pbd) and fill it */
2147         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2148         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2149
2150         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2151                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2152
2153         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2154         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2155         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2156
2157         /* this marks the BD as one that has no individual mapping */
2158         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2159
2160         DP(NETIF_MSG_TX_QUEUED,
2161            "TSO split data size is %d (%x:%x)\n",
2162            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2163
2164         /* update tx_bd */
2165         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2166
2167         return bd_prod;
2168 }
2169
2170 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2171 {
2172         if (fix > 0)
2173                 csum = (u16) ~csum_fold(csum_sub(csum,
2174                                 csum_partial(t_header - fix, fix, 0)));
2175
2176         else if (fix < 0)
2177                 csum = (u16) ~csum_fold(csum_add(csum,
2178                                 csum_partial(t_header, -fix, 0)));
2179
2180         return swab16(csum);
2181 }
2182
2183 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2184 {
2185         u32 rc;
2186
2187         if (skb->ip_summed != CHECKSUM_PARTIAL)
2188                 rc = XMIT_PLAIN;
2189
2190         else {
2191                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2192                         rc = XMIT_CSUM_V6;
2193                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2194                                 rc |= XMIT_CSUM_TCP;
2195
2196                 } else {
2197                         rc = XMIT_CSUM_V4;
2198                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2199                                 rc |= XMIT_CSUM_TCP;
2200                 }
2201         }
2202
2203         if (skb_is_gso_v6(skb))
2204                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2205         else if (skb_is_gso(skb))
2206                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2207
2208         return rc;
2209 }
2210
2211 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2212 /* check if packet requires linearization (packet is too fragmented)
2213    no need to check fragmentation if page size > 8K (there will be no
2214    violation to FW restrictions) */
2215 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2216                              u32 xmit_type)
2217 {
2218         int to_copy = 0;
2219         int hlen = 0;
2220         int first_bd_sz = 0;
2221
2222         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2223         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2224
2225                 if (xmit_type & XMIT_GSO) {
2226                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2227                         /* Check if LSO packet needs to be copied:
2228                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2229                         int wnd_size = MAX_FETCH_BD - 3;
2230                         /* Number of windows to check */
2231                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2232                         int wnd_idx = 0;
2233                         int frag_idx = 0;
2234                         u32 wnd_sum = 0;
2235
2236                         /* Headers length */
2237                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2238                                 tcp_hdrlen(skb);
2239
2240                         /* Amount of data (w/o headers) on linear part of SKB*/
2241                         first_bd_sz = skb_headlen(skb) - hlen;
2242
2243                         wnd_sum  = first_bd_sz;
2244
2245                         /* Calculate the first sum - it's special */
2246                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2247                                 wnd_sum +=
2248                                         skb_shinfo(skb)->frags[frag_idx].size;
2249
2250                         /* If there was data on linear skb data - check it */
2251                         if (first_bd_sz > 0) {
2252                                 if (unlikely(wnd_sum < lso_mss)) {
2253                                         to_copy = 1;
2254                                         goto exit_lbl;
2255                                 }
2256
2257                                 wnd_sum -= first_bd_sz;
2258                         }
2259
2260                         /* Others are easier: run through the frag list and
2261                            check all windows */
2262                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2263                                 wnd_sum +=
2264                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2265
2266                                 if (unlikely(wnd_sum < lso_mss)) {
2267                                         to_copy = 1;
2268                                         break;
2269                                 }
2270                                 wnd_sum -=
2271                                         skb_shinfo(skb)->frags[wnd_idx].size;
2272                         }
2273                 } else {
2274                         /* in non-LSO too fragmented packet should always
2275                            be linearized */
2276                         to_copy = 1;
2277                 }
2278         }
2279
2280 exit_lbl:
2281         if (unlikely(to_copy))
2282                 DP(NETIF_MSG_TX_QUEUED,
2283                    "Linearization IS REQUIRED for %s packet. "
2284                    "num_frags %d  hlen %d  first_bd_sz %d\n",
2285                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2286                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2287
2288         return to_copy;
2289 }
2290 #endif
2291
2292 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2293                                         u32 xmit_type)
2294 {
2295         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2296                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2297                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2298         if ((xmit_type & XMIT_GSO_V6) &&
2299             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2300                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2301 }
2302
2303 /**
2304  * bnx2x_set_pbd_gso - update PBD in GSO case.
2305  *
2306  * @skb:        packet skb
2307  * @pbd:        parse BD
2308  * @xmit_type:  xmit flags
2309  */
2310 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2311                                      struct eth_tx_parse_bd_e1x *pbd,
2312                                      u32 xmit_type)
2313 {
2314         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2315         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2316         pbd->tcp_flags = pbd_tcp_flags(skb);
2317
2318         if (xmit_type & XMIT_GSO_V4) {
2319                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2320                 pbd->tcp_pseudo_csum =
2321                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2322                                                   ip_hdr(skb)->daddr,
2323                                                   0, IPPROTO_TCP, 0));
2324
2325         } else
2326                 pbd->tcp_pseudo_csum =
2327                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2328                                                 &ipv6_hdr(skb)->daddr,
2329                                                 0, IPPROTO_TCP, 0));
2330
2331         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2332 }
2333
2334 /**
2335  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2336  *
2337  * @bp:                 driver handle
2338  * @skb:                packet skb
2339  * @parsing_data:       data to be updated
2340  * @xmit_type:          xmit flags
2341  *
2342  * 57712 related
2343  */
2344 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2345         u32 *parsing_data, u32 xmit_type)
2346 {
2347         *parsing_data |=
2348                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2349                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2350                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2351
2352         if (xmit_type & XMIT_CSUM_TCP) {
2353                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2354                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2355                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2356
2357                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2358         } else
2359                 /* We support checksum offload for TCP and UDP only.
2360                  * No need to pass the UDP header length - it's a constant.
2361                  */
2362                 return skb_transport_header(skb) +
2363                                 sizeof(struct udphdr) - skb->data;
2364 }
2365
2366 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2367         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2368 {
2369         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2370
2371         if (xmit_type & XMIT_CSUM_V4)
2372                 tx_start_bd->bd_flags.as_bitfield |=
2373                                         ETH_TX_BD_FLAGS_IP_CSUM;
2374         else
2375                 tx_start_bd->bd_flags.as_bitfield |=
2376                                         ETH_TX_BD_FLAGS_IPV6;
2377
2378         if (!(xmit_type & XMIT_CSUM_TCP))
2379                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2380 }
2381
2382 /**
2383  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2384  *
2385  * @bp:         driver handle
2386  * @skb:        packet skb
2387  * @pbd:        parse BD to be updated
2388  * @xmit_type:  xmit flags
2389  */
2390 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2391         struct eth_tx_parse_bd_e1x *pbd,
2392         u32 xmit_type)
2393 {
2394         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2395
2396         /* for now NS flag is not used in Linux */
2397         pbd->global_data =
2398                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2399                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2400
2401         pbd->ip_hlen_w = (skb_transport_header(skb) -
2402                         skb_network_header(skb)) >> 1;
2403
2404         hlen += pbd->ip_hlen_w;
2405
2406         /* We support checksum offload for TCP and UDP only */
2407         if (xmit_type & XMIT_CSUM_TCP)
2408                 hlen += tcp_hdrlen(skb) / 2;
2409         else
2410                 hlen += sizeof(struct udphdr) / 2;
2411
2412         pbd->total_hlen_w = cpu_to_le16(hlen);
2413         hlen = hlen*2;
2414
2415         if (xmit_type & XMIT_CSUM_TCP) {
2416                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2417
2418         } else {
2419                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2420
2421                 DP(NETIF_MSG_TX_QUEUED,
2422                    "hlen %d  fix %d  csum before fix %x\n",
2423                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2424
2425                 /* HW bug: fixup the CSUM */
2426                 pbd->tcp_pseudo_csum =
2427                         bnx2x_csum_fix(skb_transport_header(skb),
2428                                        SKB_CS(skb), fix);
2429
2430                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2431                    pbd->tcp_pseudo_csum);
2432         }
2433
2434         return hlen;
2435 }
2436
2437 /* called with netif_tx_lock
2438  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2439  * netif_wake_queue()
2440  */
2441 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2442 {
2443         struct bnx2x *bp = netdev_priv(dev);
2444         struct bnx2x_fastpath *fp;
2445         struct netdev_queue *txq;
2446         struct sw_tx_bd *tx_buf;
2447         struct eth_tx_start_bd *tx_start_bd, *first_bd;
2448         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2449         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2450         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2451         u32 pbd_e2_parsing_data = 0;
2452         u16 pkt_prod, bd_prod;
2453         int nbd, fp_index;
2454         dma_addr_t mapping;
2455         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2456         int i;
2457         u8 hlen = 0;
2458         __le16 pkt_size = 0;
2459         struct ethhdr *eth;
2460         u8 mac_type = UNICAST_ADDRESS;
2461
2462 #ifdef BNX2X_STOP_ON_ERROR
2463         if (unlikely(bp->panic))
2464                 return NETDEV_TX_BUSY;
2465 #endif
2466
2467         fp_index = skb_get_queue_mapping(skb);
2468         txq = netdev_get_tx_queue(dev, fp_index);
2469
2470         fp = &bp->fp[fp_index];
2471
2472         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2473                 fp->eth_q_stats.driver_xoff++;
2474                 netif_tx_stop_queue(txq);
2475                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2476                 return NETDEV_TX_BUSY;
2477         }
2478
2479         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2480                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2481            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2482            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2483
2484         eth = (struct ethhdr *)skb->data;
2485
2486         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2487         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2488                 if (is_broadcast_ether_addr(eth->h_dest))
2489                         mac_type = BROADCAST_ADDRESS;
2490                 else
2491                         mac_type = MULTICAST_ADDRESS;
2492         }
2493
2494 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2495         /* First, check if we need to linearize the skb (due to FW
2496            restrictions). No need to check fragmentation if page size > 8K
2497            (there will be no violation to FW restrictions) */
2498         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2499                 /* Statistics of linearization */
2500                 bp->lin_cnt++;
2501                 if (skb_linearize(skb) != 0) {
2502                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2503                            "silently dropping this SKB\n");
2504                         dev_kfree_skb_any(skb);
2505                         return NETDEV_TX_OK;
2506                 }
2507         }
2508 #endif
2509         /* Map skb linear data for DMA */
2510         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2511                                  skb_headlen(skb), DMA_TO_DEVICE);
2512         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2513                 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2514                    "silently dropping this SKB\n");
2515                 dev_kfree_skb_any(skb);
2516                 return NETDEV_TX_OK;
2517         }
2518         /*
2519         Please read carefully. First we use one BD which we mark as start,
2520         then we have a parsing info BD (used for TSO or xsum),
2521         and only then we have the rest of the TSO BDs.
2522         (don't forget to mark the last one as last,
2523         and to unmap only AFTER you write to the BD ...)
2524         And above all, all pdb sizes are in words - NOT DWORDS!
2525         */
2526
2527         /* get current pkt produced now - advance it just before sending packet
2528          * since mapping of pages may fail and cause packet to be dropped
2529          */
2530         pkt_prod = fp->tx_pkt_prod;
2531         bd_prod = TX_BD(fp->tx_bd_prod);
2532
2533         /* get a tx_buf and first BD
2534          * tx_start_bd may be changed during SPLIT,
2535          * but first_bd will always stay first
2536          */
2537         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2538         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2539         first_bd = tx_start_bd;
2540
2541         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2542         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2543                  mac_type);
2544
2545         /* header nbd */
2546         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2547
2548         /* remember the first BD of the packet */
2549         tx_buf->first_bd = fp->tx_bd_prod;
2550         tx_buf->skb = skb;
2551         tx_buf->flags = 0;
2552
2553         DP(NETIF_MSG_TX_QUEUED,
2554            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2555            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2556
2557         if (vlan_tx_tag_present(skb)) {
2558                 tx_start_bd->vlan_or_ethertype =
2559                     cpu_to_le16(vlan_tx_tag_get(skb));
2560                 tx_start_bd->bd_flags.as_bitfield |=
2561                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2562         } else
2563                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2564
2565         /* turn on parsing and get a BD */
2566         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2567
2568         if (xmit_type & XMIT_CSUM)
2569                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2570
2571         if (!CHIP_IS_E1x(bp)) {
2572                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2573                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2574                 /* Set PBD in checksum offload case */
2575                 if (xmit_type & XMIT_CSUM)
2576                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2577                                                      &pbd_e2_parsing_data,
2578                                                      xmit_type);
2579                 if (IS_MF_SI(bp)) {
2580                         /*
2581                          * fill in the MAC addresses in the PBD - for local
2582                          * switching
2583                          */
2584                         bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2585                                               &pbd_e2->src_mac_addr_mid,
2586                                               &pbd_e2->src_mac_addr_lo,
2587                                               eth->h_source);
2588                         bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2589                                               &pbd_e2->dst_mac_addr_mid,
2590                                               &pbd_e2->dst_mac_addr_lo,
2591                                               eth->h_dest);
2592                 }
2593         } else {
2594                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2595                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2596                 /* Set PBD in checksum offload case */
2597                 if (xmit_type & XMIT_CSUM)
2598                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2599
2600         }
2601
2602         /* Setup the data pointer of the first BD of the packet */
2603         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2604         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2605         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2606         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2607         pkt_size = tx_start_bd->nbytes;
2608
2609         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2610            "  nbytes %d  flags %x  vlan %x\n",
2611            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2612            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2613            tx_start_bd->bd_flags.as_bitfield,
2614            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2615
2616         if (xmit_type & XMIT_GSO) {
2617
2618                 DP(NETIF_MSG_TX_QUEUED,
2619                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2620                    skb->len, hlen, skb_headlen(skb),
2621                    skb_shinfo(skb)->gso_size);
2622
2623                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2624
2625                 if (unlikely(skb_headlen(skb) > hlen))
2626                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2627                                                  hlen, bd_prod, ++nbd);
2628                 if (!CHIP_IS_E1x(bp))
2629                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2630                                              xmit_type);
2631                 else
2632                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2633         }
2634
2635         /* Set the PBD's parsing_data field if not zero
2636          * (for the chips newer than 57711).
2637          */
2638         if (pbd_e2_parsing_data)
2639                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2640
2641         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2642
2643         /* Handle fragmented skb */
2644         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2645                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2646
2647                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2648                                        frag->page_offset, frag->size,
2649                                        DMA_TO_DEVICE);
2650                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2651
2652                         DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2653                                                 "dropping packet...\n");
2654
2655                         /* we need unmap all buffers already mapped
2656                          * for this SKB;
2657                          * first_bd->nbd need to be properly updated
2658                          * before call to bnx2x_free_tx_pkt
2659                          */
2660                         first_bd->nbd = cpu_to_le16(nbd);
2661                         bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod));
2662                         return NETDEV_TX_OK;
2663                 }
2664
2665                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2666                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2667                 if (total_pkt_bd == NULL)
2668                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2669
2670                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2671                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2672                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2673                 le16_add_cpu(&pkt_size, frag->size);
2674                 nbd++;
2675
2676                 DP(NETIF_MSG_TX_QUEUED,
2677                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2678                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2679                    le16_to_cpu(tx_data_bd->nbytes));
2680         }
2681
2682         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2683
2684         /* update with actual num BDs */
2685         first_bd->nbd = cpu_to_le16(nbd);
2686
2687         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2688
2689         /* now send a tx doorbell, counting the next BD
2690          * if the packet contains or ends with it
2691          */
2692         if (TX_BD_POFF(bd_prod) < nbd)
2693                 nbd++;
2694
2695         /* total_pkt_bytes should be set on the first data BD if
2696          * it's not an LSO packet and there is more than one
2697          * data BD. In this case pkt_size is limited by an MTU value.
2698          * However we prefer to set it for an LSO packet (while we don't
2699          * have to) in order to save some CPU cycles in a none-LSO
2700          * case, when we much more care about them.
2701          */
2702         if (total_pkt_bd != NULL)
2703                 total_pkt_bd->total_pkt_bytes = pkt_size;
2704
2705         if (pbd_e1x)
2706                 DP(NETIF_MSG_TX_QUEUED,
2707                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2708                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2709                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2710                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2711                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2712                     le16_to_cpu(pbd_e1x->total_hlen_w));
2713         if (pbd_e2)
2714                 DP(NETIF_MSG_TX_QUEUED,
2715                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2716                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2717                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2718                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2719                    pbd_e2->parsing_data);
2720         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2721
2722         fp->tx_pkt_prod++;
2723         /*
2724          * Make sure that the BD data is updated before updating the producer
2725          * since FW might read the BD right after the producer is updated.
2726          * This is only applicable for weak-ordered memory model archs such
2727          * as IA-64. The following barrier is also mandatory since FW will
2728          * assumes packets must have BDs.
2729          */
2730         wmb();
2731
2732         fp->tx_db.data.prod += nbd;
2733         barrier();
2734
2735         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2736
2737         mmiowb();
2738
2739         fp->tx_bd_prod += nbd;
2740
2741         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2742                 netif_tx_stop_queue(txq);
2743
2744                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2745                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2746                  * fp->bd_tx_cons */
2747                 smp_mb();
2748
2749                 fp->eth_q_stats.driver_xoff++;
2750                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2751                         netif_tx_wake_queue(txq);
2752         }
2753         fp->tx_pkt++;
2754
2755         return NETDEV_TX_OK;
2756 }
2757
2758 /* called with rtnl_lock */
2759 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2760 {
2761         struct sockaddr *addr = p;
2762         struct bnx2x *bp = netdev_priv(dev);
2763         int rc = 0;
2764
2765         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2766                 return -EINVAL;
2767
2768         if (netif_running(dev))  {
2769                 rc = bnx2x_set_eth_mac(bp, false);
2770                 if (rc)
2771                         return rc;
2772         }
2773
2774         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2775
2776         if (netif_running(dev))
2777                 rc = bnx2x_set_eth_mac(bp, true);
2778
2779         return rc;
2780 }
2781
2782 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2783 {
2784         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2785         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2786
2787         /* Common */
2788 #ifdef BCM_CNIC
2789         if (IS_FCOE_IDX(fp_index)) {
2790                 memset(sb, 0, sizeof(union host_hc_status_block));
2791                 fp->status_blk_mapping = 0;
2792
2793         } else {
2794 #endif
2795                 /* status blocks */
2796                 if (!CHIP_IS_E1x(bp))
2797                         BNX2X_PCI_FREE(sb->e2_sb,
2798                                        bnx2x_fp(bp, fp_index,
2799                                                 status_blk_mapping),
2800                                        sizeof(struct host_hc_status_block_e2));
2801                 else
2802                         BNX2X_PCI_FREE(sb->e1x_sb,
2803                                        bnx2x_fp(bp, fp_index,
2804                                                 status_blk_mapping),
2805                                        sizeof(struct host_hc_status_block_e1x));
2806 #ifdef BCM_CNIC
2807         }
2808 #endif
2809         /* Rx */
2810         if (!skip_rx_queue(bp, fp_index)) {
2811                 bnx2x_free_rx_bds(fp);
2812
2813                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2814                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2815                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2816                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
2817                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
2818
2819                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2820                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
2821                                sizeof(struct eth_fast_path_rx_cqe) *
2822                                NUM_RCQ_BD);
2823
2824                 /* SGE ring */
2825                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2826                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2827                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
2828                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2829         }
2830
2831         /* Tx */
2832         if (!skip_tx_queue(bp, fp_index)) {
2833                 /* fastpath tx rings: tx_buf tx_desc */
2834                 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2835                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2836                                bnx2x_fp(bp, fp_index, tx_desc_mapping),
2837                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2838         }
2839         /* end of fastpath */
2840 }
2841
2842 void bnx2x_free_fp_mem(struct bnx2x *bp)
2843 {
2844         int i;
2845         for_each_queue(bp, i)
2846                 bnx2x_free_fp_mem_at(bp, i);
2847 }
2848
2849 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2850 {
2851         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2852         if (!CHIP_IS_E1x(bp)) {
2853                 bnx2x_fp(bp, index, sb_index_values) =
2854                         (__le16 *)status_blk.e2_sb->sb.index_values;
2855                 bnx2x_fp(bp, index, sb_running_index) =
2856                         (__le16 *)status_blk.e2_sb->sb.running_index;
2857         } else {
2858                 bnx2x_fp(bp, index, sb_index_values) =
2859                         (__le16 *)status_blk.e1x_sb->sb.index_values;
2860                 bnx2x_fp(bp, index, sb_running_index) =
2861                         (__le16 *)status_blk.e1x_sb->sb.running_index;
2862         }
2863 }
2864
2865 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2866 {
2867         union host_hc_status_block *sb;
2868         struct bnx2x_fastpath *fp = &bp->fp[index];
2869         int ring_size = 0;
2870
2871         /* if rx_ring_size specified - use it */
2872         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2873                            MAX_RX_AVAIL/bp->num_queues;
2874
2875         /* allocate at least number of buffers required by FW */
2876         rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2877                                                     MIN_RX_SIZE_TPA,
2878                                   rx_ring_size);
2879
2880         bnx2x_fp(bp, index, bp) = bp;
2881         bnx2x_fp(bp, index, index) = index;
2882
2883         /* Common */
2884         sb = &bnx2x_fp(bp, index, status_blk);
2885 #ifdef BCM_CNIC
2886         if (!IS_FCOE_IDX(index)) {
2887 #endif
2888                 /* status blocks */
2889                 if (!CHIP_IS_E1x(bp))
2890                         BNX2X_PCI_ALLOC(sb->e2_sb,
2891                                 &bnx2x_fp(bp, index, status_blk_mapping),
2892                                 sizeof(struct host_hc_status_block_e2));
2893                 else
2894                         BNX2X_PCI_ALLOC(sb->e1x_sb,
2895                                 &bnx2x_fp(bp, index, status_blk_mapping),
2896                             sizeof(struct host_hc_status_block_e1x));
2897 #ifdef BCM_CNIC
2898         }
2899 #endif
2900
2901         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
2902          * set shortcuts for it.
2903          */
2904         if (!IS_FCOE_IDX(index))
2905                 set_sb_shortcuts(bp, index);
2906
2907         /* Tx */
2908         if (!skip_tx_queue(bp, index)) {
2909                 /* fastpath tx rings: tx_buf tx_desc */
2910                 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2911                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2912                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2913                                 &bnx2x_fp(bp, index, tx_desc_mapping),
2914                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2915         }
2916
2917         /* Rx */
2918         if (!skip_rx_queue(bp, index)) {
2919                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2920                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2921                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2922                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2923                                 &bnx2x_fp(bp, index, rx_desc_mapping),
2924                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2925
2926                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2927                                 &bnx2x_fp(bp, index, rx_comp_mapping),
2928                                 sizeof(struct eth_fast_path_rx_cqe) *
2929                                 NUM_RCQ_BD);
2930
2931                 /* SGE ring */
2932                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2933                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2934                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2935                                 &bnx2x_fp(bp, index, rx_sge_mapping),
2936                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2937                 /* RX BD ring */
2938                 bnx2x_set_next_page_rx_bd(fp);
2939
2940                 /* CQ ring */
2941                 bnx2x_set_next_page_rx_cq(fp);
2942
2943                 /* BDs */
2944                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2945                 if (ring_size < rx_ring_size)
2946                         goto alloc_mem_err;
2947         }
2948
2949         return 0;
2950
2951 /* handles low memory cases */
2952 alloc_mem_err:
2953         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2954                                                 index, ring_size);
2955         /* FW will drop all packets if queue is not big enough,
2956          * In these cases we disable the queue
2957          * Min size diferent for TPA and non-TPA queues
2958          */
2959         if (ring_size < (fp->disable_tpa ?
2960                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
2961                         /* release memory allocated for this queue */
2962                         bnx2x_free_fp_mem_at(bp, index);
2963                         return -ENOMEM;
2964         }
2965         return 0;
2966 }
2967
2968 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2969 {
2970         int i;
2971
2972         /**
2973          * 1. Allocate FP for leading - fatal if error
2974          * 2. {CNIC} Allocate FCoE FP - fatal if error
2975          * 3. Allocate RSS - fix number of queues if error
2976          */
2977
2978         /* leading */
2979         if (bnx2x_alloc_fp_mem_at(bp, 0))
2980                 return -ENOMEM;
2981 #ifdef BCM_CNIC
2982         if (!NO_FCOE(bp))
2983                 /* FCoE */
2984                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2985                         /* we will fail load process instead of mark
2986                          * NO_FCOE_FLAG
2987                          */
2988                         return -ENOMEM;
2989 #endif
2990         /* RSS */
2991         for_each_nondefault_eth_queue(bp, i)
2992                 if (bnx2x_alloc_fp_mem_at(bp, i))
2993                         break;
2994
2995         /* handle memory failures */
2996         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2997                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2998
2999                 WARN_ON(delta < 0);
3000 #ifdef BCM_CNIC
3001                 /**
3002                  * move non eth FPs next to last eth FP
3003                  * must be done in that order
3004                  * FCOE_IDX < FWD_IDX < OOO_IDX
3005                  */
3006
3007                 /* move FCoE fp */
3008                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3009 #endif
3010                 bp->num_queues -= delta;
3011                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3012                           bp->num_queues + delta, bp->num_queues);
3013         }
3014
3015         return 0;
3016 }
3017
3018 void bnx2x_free_mem_bp(struct bnx2x *bp)
3019 {
3020         kfree(bp->fp);
3021         kfree(bp->msix_table);
3022         kfree(bp->ilt);
3023 }
3024
3025 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3026 {
3027         struct bnx2x_fastpath *fp;
3028         struct msix_entry *tbl;
3029         struct bnx2x_ilt *ilt;
3030
3031         /* fp array */
3032         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
3033         if (!fp)
3034                 goto alloc_err;
3035         bp->fp = fp;
3036
3037         /* msix table */
3038         tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
3039                                   GFP_KERNEL);
3040         if (!tbl)
3041                 goto alloc_err;
3042         bp->msix_table = tbl;
3043
3044         /* ilt */
3045         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3046         if (!ilt)
3047                 goto alloc_err;
3048         bp->ilt = ilt;
3049
3050         return 0;
3051 alloc_err:
3052         bnx2x_free_mem_bp(bp);
3053         return -ENOMEM;
3054
3055 }
3056
3057 int bnx2x_reload_if_running(struct net_device *dev)
3058 {
3059         struct bnx2x *bp = netdev_priv(dev);
3060
3061         if (unlikely(!netif_running(dev)))
3062                 return 0;
3063
3064         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3065         return bnx2x_nic_load(bp, LOAD_NORMAL);
3066 }
3067
3068 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3069 {
3070         u32 sel_phy_idx = 0;
3071         if (bp->link_params.num_phys <= 1)
3072                 return INT_PHY;
3073
3074         if (bp->link_vars.link_up) {
3075                 sel_phy_idx = EXT_PHY1;
3076                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3077                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3078                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3079                         sel_phy_idx = EXT_PHY2;
3080         } else {
3081
3082                 switch (bnx2x_phy_selection(&bp->link_params)) {
3083                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3084                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3085                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3086                        sel_phy_idx = EXT_PHY1;
3087                        break;
3088                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3089                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3090                        sel_phy_idx = EXT_PHY2;
3091                        break;
3092                 }
3093         }
3094
3095         return sel_phy_idx;
3096
3097 }
3098 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3099 {
3100         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3101         /*
3102          * The selected actived PHY is always after swapping (in case PHY
3103          * swapping is enabled). So when swapping is enabled, we need to reverse
3104          * the configuration
3105          */
3106
3107         if (bp->link_params.multi_phy_config &
3108             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3109                 if (sel_phy_idx == EXT_PHY1)
3110                         sel_phy_idx = EXT_PHY2;
3111                 else if (sel_phy_idx == EXT_PHY2)
3112                         sel_phy_idx = EXT_PHY1;
3113         }
3114         return LINK_CONFIG_IDX(sel_phy_idx);
3115 }
3116
3117 /* called with rtnl_lock */
3118 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3119 {
3120         struct bnx2x *bp = netdev_priv(dev);
3121
3122         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3123                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3124                 return -EAGAIN;
3125         }
3126
3127         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3128             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3129                 return -EINVAL;
3130
3131         /* This does not race with packet allocation
3132          * because the actual alloc size is
3133          * only updated as part of load
3134          */
3135         dev->mtu = new_mtu;
3136
3137         return bnx2x_reload_if_running(dev);
3138 }
3139
3140 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3141 {
3142         struct bnx2x *bp = netdev_priv(dev);
3143
3144         /* TPA requires Rx CSUM offloading */
3145         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3146                 features &= ~NETIF_F_LRO;
3147
3148         return features;
3149 }
3150
3151 int bnx2x_set_features(struct net_device *dev, u32 features)
3152 {
3153         struct bnx2x *bp = netdev_priv(dev);
3154         u32 flags = bp->flags;
3155         bool bnx2x_reload = false;
3156
3157         if (features & NETIF_F_LRO)
3158                 flags |= TPA_ENABLE_FLAG;
3159         else
3160                 flags &= ~TPA_ENABLE_FLAG;
3161
3162         if (features & NETIF_F_LOOPBACK) {
3163                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3164                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
3165                         bnx2x_reload = true;
3166                 }
3167         } else {
3168                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3169                         bp->link_params.loopback_mode = LOOPBACK_NONE;
3170                         bnx2x_reload = true;
3171                 }
3172         }
3173
3174         if (flags ^ bp->flags) {
3175                 bp->flags = flags;
3176                 bnx2x_reload = true;
3177         }
3178
3179         if (bnx2x_reload) {
3180                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3181                         return bnx2x_reload_if_running(dev);
3182                 /* else: bnx2x_nic_load() will be called at end of recovery */
3183         }
3184
3185         return 0;
3186 }
3187
3188 void bnx2x_tx_timeout(struct net_device *dev)
3189 {
3190         struct bnx2x *bp = netdev_priv(dev);
3191
3192 #ifdef BNX2X_STOP_ON_ERROR
3193         if (!bp->panic)
3194                 bnx2x_panic();
3195 #endif
3196         /* This allows the netif to be shutdown gracefully before resetting */
3197         schedule_delayed_work(&bp->reset_task, 0);
3198 }
3199
3200 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3201 {
3202         struct net_device *dev = pci_get_drvdata(pdev);
3203         struct bnx2x *bp;
3204
3205         if (!dev) {
3206                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3207                 return -ENODEV;
3208         }
3209         bp = netdev_priv(dev);
3210
3211         rtnl_lock();
3212
3213         pci_save_state(pdev);
3214
3215         if (!netif_running(dev)) {
3216                 rtnl_unlock();
3217                 return 0;
3218         }
3219
3220         netif_device_detach(dev);
3221
3222         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3223
3224         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3225
3226         rtnl_unlock();
3227
3228         return 0;
3229 }
3230
3231 int bnx2x_resume(struct pci_dev *pdev)
3232 {
3233         struct net_device *dev = pci_get_drvdata(pdev);
3234         struct bnx2x *bp;
3235         int rc;
3236
3237         if (!dev) {
3238                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3239                 return -ENODEV;
3240         }
3241         bp = netdev_priv(dev);
3242
3243         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3244                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3245                 return -EAGAIN;
3246         }
3247
3248         rtnl_lock();
3249
3250         pci_restore_state(pdev);
3251
3252         if (!netif_running(dev)) {
3253                 rtnl_unlock();
3254                 return 0;
3255         }
3256
3257         bnx2x_set_power_state(bp, PCI_D0);
3258         netif_device_attach(dev);
3259
3260         /* Since the chip was reset, clear the FW sequence number */
3261         bp->fw_seq = 0;
3262         rc = bnx2x_nic_load(bp, LOAD_OPEN);
3263
3264         rtnl_unlock();
3265
3266         return rc;
3267 }
3268
3269
3270 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3271                               u32 cid)
3272 {
3273         /* ustorm cxt validation */
3274         cxt->ustorm_ag_context.cdu_usage =
3275                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3276                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3277         /* xcontext validation */
3278         cxt->xstorm_ag_context.cdu_reserved =
3279                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3280                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3281 }
3282
3283 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3284                                              u8 fw_sb_id, u8 sb_index,
3285                                              u8 ticks)
3286 {
3287
3288         u32 addr = BAR_CSTRORM_INTMEM +
3289                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3290         REG_WR8(bp, addr, ticks);
3291         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3292                           port, fw_sb_id, sb_index, ticks);
3293 }
3294
3295 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3296                                              u16 fw_sb_id, u8 sb_index,
3297                                              u8 disable)
3298 {
3299         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3300         u32 addr = BAR_CSTRORM_INTMEM +
3301                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3302         u16 flags = REG_RD16(bp, addr);
3303         /* clear and set */
3304         flags &= ~HC_INDEX_DATA_HC_ENABLED;
3305         flags |= enable_flag;
3306         REG_WR16(bp, addr, flags);
3307         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3308                           port, fw_sb_id, sb_index, disable);
3309 }
3310
3311 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3312                                     u8 sb_index, u8 disable, u16 usec)
3313 {
3314         int port = BP_PORT(bp);
3315         u8 ticks = usec / BNX2X_BTR;
3316
3317         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3318
3319         disable = disable ? 1 : (usec ? 0 : 1);
3320         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3321 }