Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
29 #include "bnx2x_sp.h"
30
31
32
33 /**
34  * bnx2x_move_fp - move content of the fastpath structure.
35  *
36  * @bp:         driver handle
37  * @from:       source FP index
38  * @to:         destination FP index
39  *
40  * Makes sure the contents of the bp->fp[to].napi is kept
41  * intact. This is done by first copying the napi struct from
42  * the target to the source, and then mem copying the entire
43  * source onto the target
44  */
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46 {
47         struct bnx2x_fastpath *from_fp = &bp->fp[from];
48         struct bnx2x_fastpath *to_fp = &bp->fp[to];
49
50         /* Copy the NAPI object as it has been already initialized */
51         from_fp->napi = to_fp->napi;
52
53         /* Move bnx2x_fastpath contents */
54         memcpy(to_fp, from_fp, sizeof(*to_fp));
55         to_fp->index = to;
56 }
57
58 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
59
60 /* free skb in the packet ring at pos idx
61  * return idx of last bd freed
62  */
63 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
64                              u16 idx, unsigned int *pkts_compl,
65                              unsigned int *bytes_compl)
66 {
67         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
68         struct eth_tx_start_bd *tx_start_bd;
69         struct eth_tx_bd *tx_data_bd;
70         struct sk_buff *skb = tx_buf->skb;
71         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
72         int nbd;
73
74         /* prefetch skb end pointer to speedup dev_kfree_skb() */
75         prefetch(&skb->end);
76
77         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
78            txdata->txq_index, idx, tx_buf, skb);
79
80         /* unmap first bd */
81         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
82         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
83                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
84
85
86         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87 #ifdef BNX2X_STOP_ON_ERROR
88         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89                 BNX2X_ERR("BAD nbd!\n");
90                 bnx2x_panic();
91         }
92 #endif
93         new_cons = nbd + tx_buf->first_bd;
94
95         /* Get the next bd */
96         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
97
98         /* Skip a parse bd... */
99         --nbd;
100         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
101
102         /* ...and the TSO split header bd since they have no mapping */
103         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
104                 --nbd;
105                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
106         }
107
108         /* now free frags */
109         while (nbd > 0) {
110
111                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
112                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
114                 if (--nbd)
115                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116         }
117
118         /* release skb */
119         WARN_ON(!skb);
120         if (likely(skb)) {
121                 (*pkts_compl)++;
122                 (*bytes_compl) += skb->len;
123         }
124
125         dev_kfree_skb_any(skb);
126         tx_buf->first_bd = 0;
127         tx_buf->skb = NULL;
128
129         return new_cons;
130 }
131
132 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
133 {
134         struct netdev_queue *txq;
135         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
136         unsigned int pkts_compl = 0, bytes_compl = 0;
137
138 #ifdef BNX2X_STOP_ON_ERROR
139         if (unlikely(bp->panic))
140                 return -1;
141 #endif
142
143         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145         sw_cons = txdata->tx_pkt_cons;
146
147         while (sw_cons != hw_cons) {
148                 u16 pkt_cons;
149
150                 pkt_cons = TX_BD(sw_cons);
151
152                 DP(NETIF_MSG_TX_DONE,
153                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
154                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
155
156                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157                     &pkts_compl, &bytes_compl);
158
159                 sw_cons++;
160         }
161
162         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
163
164         txdata->tx_pkt_cons = sw_cons;
165         txdata->tx_bd_cons = bd_cons;
166
167         /* Need to make the tx_bd_cons update visible to start_xmit()
168          * before checking for netif_tx_queue_stopped().  Without the
169          * memory barrier, there is a small possibility that
170          * start_xmit() will miss it and cause the queue to be stopped
171          * forever.
172          * On the other hand we need an rmb() here to ensure the proper
173          * ordering of bit testing in the following
174          * netif_tx_queue_stopped(txq) call.
175          */
176         smp_mb();
177
178         if (unlikely(netif_tx_queue_stopped(txq))) {
179                 /* Taking tx_lock() is needed to prevent reenabling the queue
180                  * while it's empty. This could have happen if rx_action() gets
181                  * suspended in bnx2x_tx_int() after the condition before
182                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
183                  *
184                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
185                  * sends some packets consuming the whole queue again->
186                  * stops the queue
187                  */
188
189                 __netif_tx_lock(txq, smp_processor_id());
190
191                 if ((netif_tx_queue_stopped(txq)) &&
192                     (bp->state == BNX2X_STATE_OPEN) &&
193                     (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
194                         netif_tx_wake_queue(txq);
195
196                 __netif_tx_unlock(txq);
197         }
198         return 0;
199 }
200
201 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
202                                              u16 idx)
203 {
204         u16 last_max = fp->last_max_sge;
205
206         if (SUB_S16(idx, last_max) > 0)
207                 fp->last_max_sge = idx;
208 }
209
210 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
211                                          u16 sge_len,
212                                          struct eth_end_agg_rx_cqe *cqe)
213 {
214         struct bnx2x *bp = fp->bp;
215         u16 last_max, last_elem, first_elem;
216         u16 delta = 0;
217         u16 i;
218
219         if (!sge_len)
220                 return;
221
222         /* First mark all used pages */
223         for (i = 0; i < sge_len; i++)
224                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
225                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
226
227         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
228            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
229
230         /* Here we assume that the last SGE index is the biggest */
231         prefetch((void *)(fp->sge_mask));
232         bnx2x_update_last_max_sge(fp,
233                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
234
235         last_max = RX_SGE(fp->last_max_sge);
236         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
238
239         /* If ring is not full */
240         if (last_elem + 1 != first_elem)
241                 last_elem++;
242
243         /* Now update the prod */
244         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245                 if (likely(fp->sge_mask[i]))
246                         break;
247
248                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249                 delta += BIT_VEC64_ELEM_SZ;
250         }
251
252         if (delta > 0) {
253                 fp->rx_sge_prod += delta;
254                 /* clear page-end entries */
255                 bnx2x_clear_sge_mask_next_elems(fp);
256         }
257
258         DP(NETIF_MSG_RX_STATUS,
259            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
260            fp->last_max_sge, fp->rx_sge_prod);
261 }
262
263 /* Set Toeplitz hash value in the skb using the value from the
264  * CQE (calculated by HW).
265  */
266 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267                             const struct eth_fast_path_rx_cqe *cqe)
268 {
269         /* Set Toeplitz hash from CQE */
270         if ((bp->dev->features & NETIF_F_RXHASH) &&
271             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272                 return le32_to_cpu(cqe->rss_hash_result);
273         return 0;
274 }
275
276 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
277                             u16 cons, u16 prod,
278                             struct eth_fast_path_rx_cqe *cqe)
279 {
280         struct bnx2x *bp = fp->bp;
281         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
284         dma_addr_t mapping;
285         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
287
288         /* print error if current state != stop */
289         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
290                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
291
292         /* Try to map an empty data buffer from the aggregation info  */
293         mapping = dma_map_single(&bp->pdev->dev,
294                                  first_buf->data + NET_SKB_PAD,
295                                  fp->rx_buf_size, DMA_FROM_DEVICE);
296         /*
297          *  ...if it fails - move the skb from the consumer to the producer
298          *  and set the current aggregation state as ERROR to drop it
299          *  when TPA_STOP arrives.
300          */
301
302         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303                 /* Move the BD from the consumer to the producer */
304                 bnx2x_reuse_rx_data(fp, cons, prod);
305                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
306                 return;
307         }
308
309         /* move empty data from pool to prod */
310         prod_rx_buf->data = first_buf->data;
311         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
312         /* point prod_bd to new data */
313         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
315
316         /* move partial skb from cons to pool (don't unmap yet) */
317         *first_buf = *cons_rx_buf;
318
319         /* mark bin state as START */
320         tpa_info->parsing_flags =
321                 le16_to_cpu(cqe->pars_flags.flags);
322         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323         tpa_info->tpa_state = BNX2X_TPA_START;
324         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325         tpa_info->placement_offset = cqe->placement_offset;
326         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
327         if (fp->mode == TPA_MODE_GRO) {
328                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329                 tpa_info->full_page =
330                         SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
331                 tpa_info->gro_size = gro_size;
332         }
333
334 #ifdef BNX2X_STOP_ON_ERROR
335         fp->tpa_queue_used |= (1 << queue);
336 #ifdef _ASM_GENERIC_INT_L64_H
337         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
338 #else
339         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
340 #endif
341            fp->tpa_queue_used);
342 #endif
343 }
344
345 /* Timestamp option length allowed for TPA aggregation:
346  *
347  *              nop nop kind length echo val
348  */
349 #define TPA_TSTAMP_OPT_LEN      12
350 /**
351  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
352  *
353  * @bp:                 driver handle
354  * @parsing_flags:      parsing flags from the START CQE
355  * @len_on_bd:          total length of the first packet for the
356  *                      aggregation.
357  *
358  * Approximate value of the MSS for this aggregation calculated using
359  * the first packet of it.
360  */
361 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
362                              u16 len_on_bd)
363 {
364         /*
365          * TPA arrgregation won't have either IP options or TCP options
366          * other than timestamp or IPv6 extension headers.
367          */
368         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
369
370         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
371             PRS_FLAG_OVERETH_IPV6)
372                 hdrs_len += sizeof(struct ipv6hdr);
373         else /* IPv4 */
374                 hdrs_len += sizeof(struct iphdr);
375
376
377         /* Check if there was a TCP timestamp, if there is it's will
378          * always be 12 bytes length: nop nop kind length echo val.
379          *
380          * Otherwise FW would close the aggregation.
381          */
382         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
383                 hdrs_len += TPA_TSTAMP_OPT_LEN;
384
385         return len_on_bd - hdrs_len;
386 }
387
388 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389                               struct bnx2x_fastpath *fp, u16 index)
390 {
391         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394         dma_addr_t mapping;
395
396         if (unlikely(page == NULL)) {
397                 BNX2X_ERR("Can't alloc sge\n");
398                 return -ENOMEM;
399         }
400
401         mapping = dma_map_page(&bp->pdev->dev, page, 0,
402                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404                 __free_pages(page, PAGES_PER_SGE_SHIFT);
405                 BNX2X_ERR("Can't map sge\n");
406                 return -ENOMEM;
407         }
408
409         sw_buf->page = page;
410         dma_unmap_addr_set(sw_buf, mapping, mapping);
411
412         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414
415         return 0;
416 }
417
418 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
419                                struct bnx2x_agg_info *tpa_info,
420                                u16 pages,
421                                struct sk_buff *skb,
422                                struct eth_end_agg_rx_cqe *cqe,
423                                u16 cqe_idx)
424 {
425         struct sw_rx_page *rx_pg, old_rx_pg;
426         u32 i, frag_len, frag_size;
427         int err, j, frag_id = 0;
428         u16 len_on_bd = tpa_info->len_on_bd;
429         u16 full_page = 0, gro_size = 0;
430
431         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
432
433         if (fp->mode == TPA_MODE_GRO) {
434                 gro_size = tpa_info->gro_size;
435                 full_page = tpa_info->full_page;
436         }
437
438         /* This is needed in order to enable forwarding support */
439         if (frag_size) {
440                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
441                                         tpa_info->parsing_flags, len_on_bd);
442
443                 /* set for GRO */
444                 if (fp->mode == TPA_MODE_GRO)
445                         skb_shinfo(skb)->gso_type =
446                             (GET_FLAG(tpa_info->parsing_flags,
447                                       PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
448                                                 PRS_FLAG_OVERETH_IPV6) ?
449                                 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
450         }
451
452
453 #ifdef BNX2X_STOP_ON_ERROR
454         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
455                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
456                           pages, cqe_idx);
457                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
458                 bnx2x_panic();
459                 return -EINVAL;
460         }
461 #endif
462
463         /* Run through the SGL and compose the fragmented skb */
464         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
465                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
466
467                 /* FW gives the indices of the SGE as if the ring is an array
468                    (meaning that "next" element will consume 2 indices) */
469                 if (fp->mode == TPA_MODE_GRO)
470                         frag_len = min_t(u32, frag_size, (u32)full_page);
471                 else /* LRO */
472                         frag_len = min_t(u32, frag_size,
473                                          (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
474
475                 rx_pg = &fp->rx_page_ring[sge_idx];
476                 old_rx_pg = *rx_pg;
477
478                 /* If we fail to allocate a substitute page, we simply stop
479                    where we are and drop the whole packet */
480                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481                 if (unlikely(err)) {
482                         fp->eth_q_stats.rx_skb_alloc_failed++;
483                         return err;
484                 }
485
486                 /* Unmap the page as we r going to pass it to the stack */
487                 dma_unmap_page(&bp->pdev->dev,
488                                dma_unmap_addr(&old_rx_pg, mapping),
489                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
490                 /* Add one frag and update the appropriate fields in the skb */
491                 if (fp->mode == TPA_MODE_LRO)
492                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
493                 else { /* GRO */
494                         int rem;
495                         int offset = 0;
496                         for (rem = frag_len; rem > 0; rem -= gro_size) {
497                                 int len = rem > gro_size ? gro_size : rem;
498                                 skb_fill_page_desc(skb, frag_id++,
499                                                    old_rx_pg.page, offset, len);
500                                 if (offset)
501                                         get_page(old_rx_pg.page);
502                                 offset += len;
503                         }
504                 }
505
506                 skb->data_len += frag_len;
507                 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
508                 skb->len += frag_len;
509
510                 frag_size -= frag_len;
511         }
512
513         return 0;
514 }
515
516 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517                            struct bnx2x_agg_info *tpa_info,
518                            u16 pages,
519                            struct eth_end_agg_rx_cqe *cqe,
520                            u16 cqe_idx)
521 {
522         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
523         u8 pad = tpa_info->placement_offset;
524         u16 len = tpa_info->len_on_bd;
525         struct sk_buff *skb = NULL;
526         u8 *new_data, *data = rx_buf->data;
527         u8 old_tpa_state = tpa_info->tpa_state;
528
529         tpa_info->tpa_state = BNX2X_TPA_STOP;
530
531         /* If we there was an error during the handling of the TPA_START -
532          * drop this aggregation.
533          */
534         if (old_tpa_state == BNX2X_TPA_ERROR)
535                 goto drop;
536
537         /* Try to allocate the new data */
538         new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
539
540         /* Unmap skb in the pool anyway, as we are going to change
541            pool entry status to BNX2X_TPA_STOP even if new skb allocation
542            fails. */
543         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
544                          fp->rx_buf_size, DMA_FROM_DEVICE);
545         if (likely(new_data))
546                 skb = build_skb(data, 0);
547
548         if (likely(skb)) {
549 #ifdef BNX2X_STOP_ON_ERROR
550                 if (pad + len > fp->rx_buf_size) {
551                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
552                                   pad, len, fp->rx_buf_size);
553                         bnx2x_panic();
554                         return;
555                 }
556 #endif
557
558                 skb_reserve(skb, pad + NET_SKB_PAD);
559                 skb_put(skb, len);
560                 skb->rxhash = tpa_info->rxhash;
561
562                 skb->protocol = eth_type_trans(skb, bp->dev);
563                 skb->ip_summed = CHECKSUM_UNNECESSARY;
564
565                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
566                                          skb, cqe, cqe_idx)) {
567                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
568                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
569                         napi_gro_receive(&fp->napi, skb);
570                 } else {
571                         DP(NETIF_MSG_RX_STATUS,
572                            "Failed to allocate new pages - dropping packet!\n");
573                         dev_kfree_skb_any(skb);
574                 }
575
576
577                 /* put new data in bin */
578                 rx_buf->data = new_data;
579
580                 return;
581         }
582         kfree(new_data);
583 drop:
584         /* drop the packet and keep the buffer in the bin */
585         DP(NETIF_MSG_RX_STATUS,
586            "Failed to allocate or map a new skb - dropping packet!\n");
587         fp->eth_q_stats.rx_skb_alloc_failed++;
588 }
589
590 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591                                struct bnx2x_fastpath *fp, u16 index)
592 {
593         u8 *data;
594         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596         dma_addr_t mapping;
597
598         data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599         if (unlikely(data == NULL))
600                 return -ENOMEM;
601
602         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603                                  fp->rx_buf_size,
604                                  DMA_FROM_DEVICE);
605         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606                 kfree(data);
607                 BNX2X_ERR("Can't map rx data\n");
608                 return -ENOMEM;
609         }
610
611         rx_buf->data = data;
612         dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616
617         return 0;
618 }
619
620 static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
621                                 struct bnx2x_fastpath *fp)
622 {
623         /* Do nothing if no IP/L4 csum validation was done */
624
625         if (cqe->fast_path_cqe.status_flags &
626             (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
627              ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
628                 return;
629
630         /* If both IP/L4 validation were done, check if an error was found. */
631
632         if (cqe->fast_path_cqe.type_error_flags &
633             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635                 fp->eth_q_stats.hw_csum_err++;
636         else
637                 skb->ip_summed = CHECKSUM_UNNECESSARY;
638 }
639
640 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
641 {
642         struct bnx2x *bp = fp->bp;
643         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
644         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
645         int rx_pkt = 0;
646
647 #ifdef BNX2X_STOP_ON_ERROR
648         if (unlikely(bp->panic))
649                 return 0;
650 #endif
651
652         /* CQ "next element" is of the size of the regular element,
653            that's why it's ok here */
654         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
655         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
656                 hw_comp_cons++;
657
658         bd_cons = fp->rx_bd_cons;
659         bd_prod = fp->rx_bd_prod;
660         bd_prod_fw = bd_prod;
661         sw_comp_cons = fp->rx_comp_cons;
662         sw_comp_prod = fp->rx_comp_prod;
663
664         /* Memory barrier necessary as speculative reads of the rx
665          * buffer can be ahead of the index in the status block
666          */
667         rmb();
668
669         DP(NETIF_MSG_RX_STATUS,
670            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
671            fp->index, hw_comp_cons, sw_comp_cons);
672
673         while (sw_comp_cons != hw_comp_cons) {
674                 struct sw_rx_bd *rx_buf = NULL;
675                 struct sk_buff *skb;
676                 union eth_rx_cqe *cqe;
677                 struct eth_fast_path_rx_cqe *cqe_fp;
678                 u8 cqe_fp_flags;
679                 enum eth_rx_cqe_type cqe_fp_type;
680                 u16 len, pad, queue;
681                 u8 *data;
682
683 #ifdef BNX2X_STOP_ON_ERROR
684                 if (unlikely(bp->panic))
685                         return 0;
686 #endif
687
688                 comp_ring_cons = RCQ_BD(sw_comp_cons);
689                 bd_prod = RX_BD(bd_prod);
690                 bd_cons = RX_BD(bd_cons);
691
692                 cqe = &fp->rx_comp_ring[comp_ring_cons];
693                 cqe_fp = &cqe->fast_path_cqe;
694                 cqe_fp_flags = cqe_fp->type_error_flags;
695                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
696
697                 DP(NETIF_MSG_RX_STATUS,
698                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
699                    CQE_TYPE(cqe_fp_flags),
700                    cqe_fp_flags, cqe_fp->status_flags,
701                    le32_to_cpu(cqe_fp->rss_hash_result),
702                    le16_to_cpu(cqe_fp->vlan_tag),
703                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
704
705                 /* is this a slowpath msg? */
706                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
707                         bnx2x_sp_event(fp, cqe);
708                         goto next_cqe;
709                 }
710
711                 rx_buf = &fp->rx_buf_ring[bd_cons];
712                 data = rx_buf->data;
713
714                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
715                         struct bnx2x_agg_info *tpa_info;
716                         u16 frag_size, pages;
717 #ifdef BNX2X_STOP_ON_ERROR
718                         /* sanity check */
719                         if (fp->disable_tpa &&
720                             (CQE_TYPE_START(cqe_fp_type) ||
721                              CQE_TYPE_STOP(cqe_fp_type)))
722                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
723                                           CQE_TYPE(cqe_fp_type));
724 #endif
725
726                         if (CQE_TYPE_START(cqe_fp_type)) {
727                                 u16 queue = cqe_fp->queue_index;
728                                 DP(NETIF_MSG_RX_STATUS,
729                                    "calling tpa_start on queue %d\n",
730                                    queue);
731
732                                 bnx2x_tpa_start(fp, queue,
733                                                 bd_cons, bd_prod,
734                                                 cqe_fp);
735
736                                 goto next_rx;
737
738                         }
739                         queue = cqe->end_agg_cqe.queue_index;
740                         tpa_info = &fp->tpa_info[queue];
741                         DP(NETIF_MSG_RX_STATUS,
742                            "calling tpa_stop on queue %d\n",
743                            queue);
744
745                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
746                                     tpa_info->len_on_bd;
747
748                         if (fp->mode == TPA_MODE_GRO)
749                                 pages = (frag_size + tpa_info->full_page - 1) /
750                                          tpa_info->full_page;
751                         else
752                                 pages = SGE_PAGE_ALIGN(frag_size) >>
753                                         SGE_PAGE_SHIFT;
754
755                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
756                                        &cqe->end_agg_cqe, comp_ring_cons);
757 #ifdef BNX2X_STOP_ON_ERROR
758                         if (bp->panic)
759                                 return 0;
760 #endif
761
762                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
763                         goto next_cqe;
764                 }
765                 /* non TPA */
766                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
767                 pad = cqe_fp->placement_offset;
768                 dma_sync_single_for_cpu(&bp->pdev->dev,
769                                         dma_unmap_addr(rx_buf, mapping),
770                                         pad + RX_COPY_THRESH,
771                                         DMA_FROM_DEVICE);
772                 pad += NET_SKB_PAD;
773                 prefetch(data + pad); /* speedup eth_type_trans() */
774                 /* is this an error packet? */
775                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
776                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
777                            "ERROR  flags %x  rx packet %u\n",
778                            cqe_fp_flags, sw_comp_cons);
779                         fp->eth_q_stats.rx_err_discard_pkt++;
780                         goto reuse_rx;
781                 }
782
783                 /* Since we don't have a jumbo ring
784                  * copy small packets if mtu > 1500
785                  */
786                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
787                     (len <= RX_COPY_THRESH)) {
788                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
789                         if (skb == NULL) {
790                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
791                                    "ERROR  packet dropped because of alloc failure\n");
792                                 fp->eth_q_stats.rx_skb_alloc_failed++;
793                                 goto reuse_rx;
794                         }
795                         memcpy(skb->data, data + pad, len);
796                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
797                 } else {
798                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
799                                 dma_unmap_single(&bp->pdev->dev,
800                                                  dma_unmap_addr(rx_buf, mapping),
801                                                  fp->rx_buf_size,
802                                                  DMA_FROM_DEVICE);
803                                 skb = build_skb(data, 0);
804                                 if (unlikely(!skb)) {
805                                         kfree(data);
806                                         fp->eth_q_stats.rx_skb_alloc_failed++;
807                                         goto next_rx;
808                                 }
809                                 skb_reserve(skb, pad);
810                         } else {
811                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
812                                    "ERROR  packet dropped because of alloc failure\n");
813                                 fp->eth_q_stats.rx_skb_alloc_failed++;
814 reuse_rx:
815                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
816                                 goto next_rx;
817                         }
818                 }
819
820                 skb_put(skb, len);
821                 skb->protocol = eth_type_trans(skb, bp->dev);
822
823                 /* Set Toeplitz hash for a none-LRO skb */
824                 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
825
826                 skb_checksum_none_assert(skb);
827
828                 if (bp->dev->features & NETIF_F_RXCSUM)
829                         bnx2x_csum_validate(skb, cqe, fp);
830
831
832                 skb_record_rx_queue(skb, fp->rx_queue);
833
834                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
835                     PARSING_FLAGS_VLAN)
836                         __vlan_hwaccel_put_tag(skb,
837                                                le16_to_cpu(cqe_fp->vlan_tag));
838                 napi_gro_receive(&fp->napi, skb);
839
840
841 next_rx:
842                 rx_buf->data = NULL;
843
844                 bd_cons = NEXT_RX_IDX(bd_cons);
845                 bd_prod = NEXT_RX_IDX(bd_prod);
846                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
847                 rx_pkt++;
848 next_cqe:
849                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
850                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
851
852                 if (rx_pkt == budget)
853                         break;
854         } /* while */
855
856         fp->rx_bd_cons = bd_cons;
857         fp->rx_bd_prod = bd_prod_fw;
858         fp->rx_comp_cons = sw_comp_cons;
859         fp->rx_comp_prod = sw_comp_prod;
860
861         /* Update producers */
862         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
863                              fp->rx_sge_prod);
864
865         fp->rx_pkt += rx_pkt;
866         fp->rx_calls++;
867
868         return rx_pkt;
869 }
870
871 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
872 {
873         struct bnx2x_fastpath *fp = fp_cookie;
874         struct bnx2x *bp = fp->bp;
875         u8 cos;
876
877         DP(NETIF_MSG_INTR,
878            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
879            fp->index, fp->fw_sb_id, fp->igu_sb_id);
880         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
881
882 #ifdef BNX2X_STOP_ON_ERROR
883         if (unlikely(bp->panic))
884                 return IRQ_HANDLED;
885 #endif
886
887         /* Handle Rx and Tx according to MSI-X vector */
888         prefetch(fp->rx_cons_sb);
889
890         for_each_cos_in_tx_queue(fp, cos)
891                 prefetch(fp->txdata[cos].tx_cons_sb);
892
893         prefetch(&fp->sb_running_index[SM_RX_ID]);
894         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
895
896         return IRQ_HANDLED;
897 }
898
899 /* HW Lock for shared dual port PHYs */
900 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
901 {
902         mutex_lock(&bp->port.phy_mutex);
903
904         if (bp->port.need_hw_lock)
905                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
906 }
907
908 void bnx2x_release_phy_lock(struct bnx2x *bp)
909 {
910         if (bp->port.need_hw_lock)
911                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
912
913         mutex_unlock(&bp->port.phy_mutex);
914 }
915
916 /* calculates MF speed according to current linespeed and MF configuration */
917 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
918 {
919         u16 line_speed = bp->link_vars.line_speed;
920         if (IS_MF(bp)) {
921                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
922                                                    bp->mf_config[BP_VN(bp)]);
923
924                 /* Calculate the current MAX line speed limit for the MF
925                  * devices
926                  */
927                 if (IS_MF_SI(bp))
928                         line_speed = (line_speed * maxCfg) / 100;
929                 else { /* SD mode */
930                         u16 vn_max_rate = maxCfg * 100;
931
932                         if (vn_max_rate < line_speed)
933                                 line_speed = vn_max_rate;
934                 }
935         }
936
937         return line_speed;
938 }
939
940 /**
941  * bnx2x_fill_report_data - fill link report data to report
942  *
943  * @bp:         driver handle
944  * @data:       link state to update
945  *
946  * It uses a none-atomic bit operations because is called under the mutex.
947  */
948 static void bnx2x_fill_report_data(struct bnx2x *bp,
949                                    struct bnx2x_link_report_data *data)
950 {
951         u16 line_speed = bnx2x_get_mf_speed(bp);
952
953         memset(data, 0, sizeof(*data));
954
955         /* Fill the report data: efective line speed */
956         data->line_speed = line_speed;
957
958         /* Link is down */
959         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
960                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
961                           &data->link_report_flags);
962
963         /* Full DUPLEX */
964         if (bp->link_vars.duplex == DUPLEX_FULL)
965                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
966
967         /* Rx Flow Control is ON */
968         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
969                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
970
971         /* Tx Flow Control is ON */
972         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
973                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
974 }
975
976 /**
977  * bnx2x_link_report - report link status to OS.
978  *
979  * @bp:         driver handle
980  *
981  * Calls the __bnx2x_link_report() under the same locking scheme
982  * as a link/PHY state managing code to ensure a consistent link
983  * reporting.
984  */
985
986 void bnx2x_link_report(struct bnx2x *bp)
987 {
988         bnx2x_acquire_phy_lock(bp);
989         __bnx2x_link_report(bp);
990         bnx2x_release_phy_lock(bp);
991 }
992
993 /**
994  * __bnx2x_link_report - report link status to OS.
995  *
996  * @bp:         driver handle
997  *
998  * None atomic inmlementation.
999  * Should be called under the phy_lock.
1000  */
1001 void __bnx2x_link_report(struct bnx2x *bp)
1002 {
1003         struct bnx2x_link_report_data cur_data;
1004
1005         /* reread mf_cfg */
1006         if (!CHIP_IS_E1(bp))
1007                 bnx2x_read_mf_cfg(bp);
1008
1009         /* Read the current link report info */
1010         bnx2x_fill_report_data(bp, &cur_data);
1011
1012         /* Don't report link down or exactly the same link status twice */
1013         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1014             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1015                       &bp->last_reported_link.link_report_flags) &&
1016              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1017                       &cur_data.link_report_flags)))
1018                 return;
1019
1020         bp->link_cnt++;
1021
1022         /* We are going to report a new link parameters now -
1023          * remember the current data for the next time.
1024          */
1025         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1026
1027         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1028                      &cur_data.link_report_flags)) {
1029                 netif_carrier_off(bp->dev);
1030                 netdev_err(bp->dev, "NIC Link is Down\n");
1031                 return;
1032         } else {
1033                 const char *duplex;
1034                 const char *flow;
1035
1036                 netif_carrier_on(bp->dev);
1037
1038                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1039                                        &cur_data.link_report_flags))
1040                         duplex = "full";
1041                 else
1042                         duplex = "half";
1043
1044                 /* Handle the FC at the end so that only these flags would be
1045                  * possibly set. This way we may easily check if there is no FC
1046                  * enabled.
1047                  */
1048                 if (cur_data.link_report_flags) {
1049                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1050                                      &cur_data.link_report_flags)) {
1051                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1052                                      &cur_data.link_report_flags))
1053                                         flow = "ON - receive & transmit";
1054                                 else
1055                                         flow = "ON - receive";
1056                         } else {
1057                                 flow = "ON - transmit";
1058                         }
1059                 } else {
1060                         flow = "none";
1061                 }
1062                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1063                             cur_data.line_speed, duplex, flow);
1064         }
1065 }
1066
1067 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1068 {
1069         int i;
1070
1071         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1072                 struct eth_rx_sge *sge;
1073
1074                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1075                 sge->addr_hi =
1076                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1077                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1078
1079                 sge->addr_lo =
1080                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1081                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1082         }
1083 }
1084
1085 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1086                                 struct bnx2x_fastpath *fp, int last)
1087 {
1088         int i;
1089
1090         for (i = 0; i < last; i++) {
1091                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1092                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1093                 u8 *data = first_buf->data;
1094
1095                 if (data == NULL) {
1096                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1097                         continue;
1098                 }
1099                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1100                         dma_unmap_single(&bp->pdev->dev,
1101                                          dma_unmap_addr(first_buf, mapping),
1102                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1103                 kfree(data);
1104                 first_buf->data = NULL;
1105         }
1106 }
1107
1108 void bnx2x_init_rx_rings(struct bnx2x *bp)
1109 {
1110         int func = BP_FUNC(bp);
1111         u16 ring_prod;
1112         int i, j;
1113
1114         /* Allocate TPA resources */
1115         for_each_rx_queue(bp, j) {
1116                 struct bnx2x_fastpath *fp = &bp->fp[j];
1117
1118                 DP(NETIF_MSG_IFUP,
1119                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1120
1121                 if (!fp->disable_tpa) {
1122                         /* Fill the per-aggregtion pool */
1123                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1124                                 struct bnx2x_agg_info *tpa_info =
1125                                         &fp->tpa_info[i];
1126                                 struct sw_rx_bd *first_buf =
1127                                         &tpa_info->first_buf;
1128
1129                                 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1130                                                           GFP_ATOMIC);
1131                                 if (!first_buf->data) {
1132                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1133                                                   j);
1134                                         bnx2x_free_tpa_pool(bp, fp, i);
1135                                         fp->disable_tpa = 1;
1136                                         break;
1137                                 }
1138                                 dma_unmap_addr_set(first_buf, mapping, 0);
1139                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1140                         }
1141
1142                         /* "next page" elements initialization */
1143                         bnx2x_set_next_page_sgl(fp);
1144
1145                         /* set SGEs bit mask */
1146                         bnx2x_init_sge_ring_bit_mask(fp);
1147
1148                         /* Allocate SGEs and initialize the ring elements */
1149                         for (i = 0, ring_prod = 0;
1150                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1151
1152                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1153                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1154                                                   i);
1155                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1156                                                   j);
1157                                         /* Cleanup already allocated elements */
1158                                         bnx2x_free_rx_sge_range(bp, fp,
1159                                                                 ring_prod);
1160                                         bnx2x_free_tpa_pool(bp, fp,
1161                                                             MAX_AGG_QS(bp));
1162                                         fp->disable_tpa = 1;
1163                                         ring_prod = 0;
1164                                         break;
1165                                 }
1166                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1167                         }
1168
1169                         fp->rx_sge_prod = ring_prod;
1170                 }
1171         }
1172
1173         for_each_rx_queue(bp, j) {
1174                 struct bnx2x_fastpath *fp = &bp->fp[j];
1175
1176                 fp->rx_bd_cons = 0;
1177
1178                 /* Activate BD ring */
1179                 /* Warning!
1180                  * this will generate an interrupt (to the TSTORM)
1181                  * must only be done after chip is initialized
1182                  */
1183                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1184                                      fp->rx_sge_prod);
1185
1186                 if (j != 0)
1187                         continue;
1188
1189                 if (CHIP_IS_E1(bp)) {
1190                         REG_WR(bp, BAR_USTRORM_INTMEM +
1191                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1192                                U64_LO(fp->rx_comp_mapping));
1193                         REG_WR(bp, BAR_USTRORM_INTMEM +
1194                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1195                                U64_HI(fp->rx_comp_mapping));
1196                 }
1197         }
1198 }
1199
1200 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1201 {
1202         int i;
1203         u8 cos;
1204
1205         for_each_tx_queue(bp, i) {
1206                 struct bnx2x_fastpath *fp = &bp->fp[i];
1207                 for_each_cos_in_tx_queue(fp, cos) {
1208                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1209                         unsigned pkts_compl = 0, bytes_compl = 0;
1210
1211                         u16 sw_prod = txdata->tx_pkt_prod;
1212                         u16 sw_cons = txdata->tx_pkt_cons;
1213
1214                         while (sw_cons != sw_prod) {
1215                                 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1216                                     &pkts_compl, &bytes_compl);
1217                                 sw_cons++;
1218                         }
1219                         netdev_tx_reset_queue(
1220                             netdev_get_tx_queue(bp->dev, txdata->txq_index));
1221                 }
1222         }
1223 }
1224
1225 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1226 {
1227         struct bnx2x *bp = fp->bp;
1228         int i;
1229
1230         /* ring wasn't allocated */
1231         if (fp->rx_buf_ring == NULL)
1232                 return;
1233
1234         for (i = 0; i < NUM_RX_BD; i++) {
1235                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1236                 u8 *data = rx_buf->data;
1237
1238                 if (data == NULL)
1239                         continue;
1240                 dma_unmap_single(&bp->pdev->dev,
1241                                  dma_unmap_addr(rx_buf, mapping),
1242                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1243
1244                 rx_buf->data = NULL;
1245                 kfree(data);
1246         }
1247 }
1248
1249 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1250 {
1251         int j;
1252
1253         for_each_rx_queue(bp, j) {
1254                 struct bnx2x_fastpath *fp = &bp->fp[j];
1255
1256                 bnx2x_free_rx_bds(fp);
1257
1258                 if (!fp->disable_tpa)
1259                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1260         }
1261 }
1262
1263 void bnx2x_free_skbs(struct bnx2x *bp)
1264 {
1265         bnx2x_free_tx_skbs(bp);
1266         bnx2x_free_rx_skbs(bp);
1267 }
1268
1269 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1270 {
1271         /* load old values */
1272         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1273
1274         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1275                 /* leave all but MAX value */
1276                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1277
1278                 /* set new MAX value */
1279                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1280                                 & FUNC_MF_CFG_MAX_BW_MASK;
1281
1282                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1283         }
1284 }
1285
1286 /**
1287  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1288  *
1289  * @bp:         driver handle
1290  * @nvecs:      number of vectors to be released
1291  */
1292 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1293 {
1294         int i, offset = 0;
1295
1296         if (nvecs == offset)
1297                 return;
1298         free_irq(bp->msix_table[offset].vector, bp->dev);
1299         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1300            bp->msix_table[offset].vector);
1301         offset++;
1302 #ifdef BCM_CNIC
1303         if (nvecs == offset)
1304                 return;
1305         offset++;
1306 #endif
1307
1308         for_each_eth_queue(bp, i) {
1309                 if (nvecs == offset)
1310                         return;
1311                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1312                    i, bp->msix_table[offset].vector);
1313
1314                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1315         }
1316 }
1317
1318 void bnx2x_free_irq(struct bnx2x *bp)
1319 {
1320         if (bp->flags & USING_MSIX_FLAG &&
1321             !(bp->flags & USING_SINGLE_MSIX_FLAG))
1322                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1323                                      CNIC_PRESENT + 1);
1324         else
1325                 free_irq(bp->dev->irq, bp->dev);
1326 }
1327
1328 int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1329 {
1330         int msix_vec = 0, i, rc, req_cnt;
1331
1332         bp->msix_table[msix_vec].entry = msix_vec;
1333         BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1334            bp->msix_table[0].entry);
1335         msix_vec++;
1336
1337 #ifdef BCM_CNIC
1338         bp->msix_table[msix_vec].entry = msix_vec;
1339         BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1340            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1341         msix_vec++;
1342 #endif
1343         /* We need separate vectors for ETH queues only (not FCoE) */
1344         for_each_eth_queue(bp, i) {
1345                 bp->msix_table[msix_vec].entry = msix_vec;
1346                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1347                                msix_vec, msix_vec, i);
1348                 msix_vec++;
1349         }
1350
1351         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1352
1353         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1354
1355         /*
1356          * reconfigure number of tx/rx queues according to available
1357          * MSI-X vectors
1358          */
1359         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1360                 /* how less vectors we will have? */
1361                 int diff = req_cnt - rc;
1362
1363                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1364
1365                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1366
1367                 if (rc) {
1368                         BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1369                         goto no_msix;
1370                 }
1371                 /*
1372                  * decrease number of queues by number of unallocated entries
1373                  */
1374                 bp->num_queues -= diff;
1375
1376                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1377                                bp->num_queues);
1378         } else if (rc > 0) {
1379                 /* Get by with single vector */
1380                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1381                 if (rc) {
1382                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1383                                        rc);
1384                         goto no_msix;
1385                 }
1386
1387                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1388                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1389
1390         } else if (rc < 0) {
1391                 BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1392                 goto no_msix;
1393         }
1394
1395         bp->flags |= USING_MSIX_FLAG;
1396
1397         return 0;
1398
1399 no_msix:
1400         /* fall to INTx if not enough memory */
1401         if (rc == -ENOMEM)
1402                 bp->flags |= DISABLE_MSI_FLAG;
1403
1404         return rc;
1405 }
1406
1407 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1408 {
1409         int i, rc, offset = 0;
1410
1411         rc = request_irq(bp->msix_table[offset++].vector,
1412                          bnx2x_msix_sp_int, 0,
1413                          bp->dev->name, bp->dev);
1414         if (rc) {
1415                 BNX2X_ERR("request sp irq failed\n");
1416                 return -EBUSY;
1417         }
1418
1419 #ifdef BCM_CNIC
1420         offset++;
1421 #endif
1422         for_each_eth_queue(bp, i) {
1423                 struct bnx2x_fastpath *fp = &bp->fp[i];
1424                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1425                          bp->dev->name, i);
1426
1427                 rc = request_irq(bp->msix_table[offset].vector,
1428                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1429                 if (rc) {
1430                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1431                               bp->msix_table[offset].vector, rc);
1432                         bnx2x_free_msix_irqs(bp, offset);
1433                         return -EBUSY;
1434                 }
1435
1436                 offset++;
1437         }
1438
1439         i = BNX2X_NUM_ETH_QUEUES(bp);
1440         offset = 1 + CNIC_PRESENT;
1441         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1442                bp->msix_table[0].vector,
1443                0, bp->msix_table[offset].vector,
1444                i - 1, bp->msix_table[offset + i - 1].vector);
1445
1446         return 0;
1447 }
1448
1449 int bnx2x_enable_msi(struct bnx2x *bp)
1450 {
1451         int rc;
1452
1453         rc = pci_enable_msi(bp->pdev);
1454         if (rc) {
1455                 BNX2X_DEV_INFO("MSI is not attainable\n");
1456                 return -1;
1457         }
1458         bp->flags |= USING_MSI_FLAG;
1459
1460         return 0;
1461 }
1462
1463 static int bnx2x_req_irq(struct bnx2x *bp)
1464 {
1465         unsigned long flags;
1466         unsigned int irq;
1467
1468         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1469                 flags = 0;
1470         else
1471                 flags = IRQF_SHARED;
1472
1473         if (bp->flags & USING_MSIX_FLAG)
1474                 irq = bp->msix_table[0].vector;
1475         else
1476                 irq = bp->pdev->irq;
1477
1478         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1479 }
1480
1481 static int bnx2x_setup_irqs(struct bnx2x *bp)
1482 {
1483         int rc = 0;
1484         if (bp->flags & USING_MSIX_FLAG &&
1485             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1486                 rc = bnx2x_req_msix_irqs(bp);
1487                 if (rc)
1488                         return rc;
1489         } else {
1490                 bnx2x_ack_int(bp);
1491                 rc = bnx2x_req_irq(bp);
1492                 if (rc) {
1493                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1494                         return rc;
1495                 }
1496                 if (bp->flags & USING_MSI_FLAG) {
1497                         bp->dev->irq = bp->pdev->irq;
1498                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1499                                     bp->dev->irq);
1500                 }
1501                 if (bp->flags & USING_MSIX_FLAG) {
1502                         bp->dev->irq = bp->msix_table[0].vector;
1503                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1504                                     bp->dev->irq);
1505                 }
1506         }
1507
1508         return 0;
1509 }
1510
1511 static void bnx2x_napi_enable(struct bnx2x *bp)
1512 {
1513         int i;
1514
1515         for_each_rx_queue(bp, i)
1516                 napi_enable(&bnx2x_fp(bp, i, napi));
1517 }
1518
1519 static void bnx2x_napi_disable(struct bnx2x *bp)
1520 {
1521         int i;
1522
1523         for_each_rx_queue(bp, i)
1524                 napi_disable(&bnx2x_fp(bp, i, napi));
1525 }
1526
1527 void bnx2x_netif_start(struct bnx2x *bp)
1528 {
1529         if (netif_running(bp->dev)) {
1530                 bnx2x_napi_enable(bp);
1531                 bnx2x_int_enable(bp);
1532                 if (bp->state == BNX2X_STATE_OPEN)
1533                         netif_tx_wake_all_queues(bp->dev);
1534         }
1535 }
1536
1537 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1538 {
1539         bnx2x_int_disable_sync(bp, disable_hw);
1540         bnx2x_napi_disable(bp);
1541 }
1542
1543 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1544 {
1545         struct bnx2x *bp = netdev_priv(dev);
1546
1547 #ifdef BCM_CNIC
1548         if (!NO_FCOE(bp)) {
1549                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1550                 u16 ether_type = ntohs(hdr->h_proto);
1551
1552                 /* Skip VLAN tag if present */
1553                 if (ether_type == ETH_P_8021Q) {
1554                         struct vlan_ethhdr *vhdr =
1555                                 (struct vlan_ethhdr *)skb->data;
1556
1557                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1558                 }
1559
1560                 /* If ethertype is FCoE or FIP - use FCoE ring */
1561                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1562                         return bnx2x_fcoe_tx(bp, txq_index);
1563         }
1564 #endif
1565         /* select a non-FCoE queue */
1566         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1567 }
1568
1569
1570 void bnx2x_set_num_queues(struct bnx2x *bp)
1571 {
1572         /* RSS queues */
1573         bp->num_queues = bnx2x_calc_num_queues(bp);
1574
1575 #ifdef BCM_CNIC
1576         /* override in STORAGE SD modes */
1577         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1578                 bp->num_queues = 1;
1579 #endif
1580         /* Add special queues */
1581         bp->num_queues += NON_ETH_CONTEXT_USE;
1582 }
1583
1584 /**
1585  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1586  *
1587  * @bp:         Driver handle
1588  *
1589  * We currently support for at most 16 Tx queues for each CoS thus we will
1590  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1591  * bp->max_cos.
1592  *
1593  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1594  * index after all ETH L2 indices.
1595  *
1596  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1597  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1598  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1599  *
1600  * The proper configuration of skb->queue_mapping is handled by
1601  * bnx2x_select_queue() and __skb_tx_hash().
1602  *
1603  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1604  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1605  */
1606 static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1607 {
1608         int rc, tx, rx;
1609
1610         tx = MAX_TXQS_PER_COS * bp->max_cos;
1611         rx = BNX2X_NUM_ETH_QUEUES(bp);
1612
1613 /* account for fcoe queue */
1614 #ifdef BCM_CNIC
1615         if (!NO_FCOE(bp)) {
1616                 rx += FCOE_PRESENT;
1617                 tx += FCOE_PRESENT;
1618         }
1619 #endif
1620
1621         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1622         if (rc) {
1623                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1624                 return rc;
1625         }
1626         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1627         if (rc) {
1628                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1629                 return rc;
1630         }
1631
1632         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1633                           tx, rx);
1634
1635         return rc;
1636 }
1637
1638 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1639 {
1640         int i;
1641
1642         for_each_queue(bp, i) {
1643                 struct bnx2x_fastpath *fp = &bp->fp[i];
1644                 u32 mtu;
1645
1646                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1647                 if (IS_FCOE_IDX(i))
1648                         /*
1649                          * Although there are no IP frames expected to arrive to
1650                          * this ring we still want to add an
1651                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1652                          * overrun attack.
1653                          */
1654                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1655                 else
1656                         mtu = bp->dev->mtu;
1657                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1658                                   IP_HEADER_ALIGNMENT_PADDING +
1659                                   ETH_OVREHEAD +
1660                                   mtu +
1661                                   BNX2X_FW_RX_ALIGN_END;
1662                 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1663         }
1664 }
1665
1666 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1667 {
1668         int i;
1669         u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1670         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1671
1672         /* Prepare the initial contents fo the indirection table if RSS is
1673          * enabled
1674          */
1675         for (i = 0; i < sizeof(ind_table); i++)
1676                 ind_table[i] =
1677                         bp->fp->cl_id +
1678                         ethtool_rxfh_indir_default(i, num_eth_queues);
1679
1680         /*
1681          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1682          * per-port, so if explicit configuration is needed , do it only
1683          * for a PMF.
1684          *
1685          * For 57712 and newer on the other hand it's a per-function
1686          * configuration.
1687          */
1688         return bnx2x_config_rss_eth(bp, ind_table,
1689                                     bp->port.pmf || !CHIP_IS_E1x(bp));
1690 }
1691
1692 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1693                         u8 *ind_table, bool config_hash)
1694 {
1695         struct bnx2x_config_rss_params params = {NULL};
1696         int i;
1697
1698         /* Although RSS is meaningless when there is a single HW queue we
1699          * still need it enabled in order to have HW Rx hash generated.
1700          *
1701          * if (!is_eth_multi(bp))
1702          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1703          */
1704
1705         params.rss_obj = rss_obj;
1706
1707         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1708
1709         __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1710
1711         /* RSS configuration */
1712         __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1713         __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1714         __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1715         __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1716
1717         /* Hash bits */
1718         params.rss_result_mask = MULTI_MASK;
1719
1720         memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1721
1722         if (config_hash) {
1723                 /* RSS keys */
1724                 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1725                         params.rss_key[i] = random32();
1726
1727                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1728         }
1729
1730         return bnx2x_config_rss(bp, &params);
1731 }
1732
1733 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1734 {
1735         struct bnx2x_func_state_params func_params = {NULL};
1736
1737         /* Prepare parameters for function state transitions */
1738         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1739
1740         func_params.f_obj = &bp->func_obj;
1741         func_params.cmd = BNX2X_F_CMD_HW_INIT;
1742
1743         func_params.params.hw_init.load_phase = load_code;
1744
1745         return bnx2x_func_state_change(bp, &func_params);
1746 }
1747
1748 /*
1749  * Cleans the object that have internal lists without sending
1750  * ramrods. Should be run when interrutps are disabled.
1751  */
1752 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1753 {
1754         int rc;
1755         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1756         struct bnx2x_mcast_ramrod_params rparam = {NULL};
1757         struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1758
1759         /***************** Cleanup MACs' object first *************************/
1760
1761         /* Wait for completion of requested */
1762         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1763         /* Perform a dry cleanup */
1764         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1765
1766         /* Clean ETH primary MAC */
1767         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1768         rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1769                                  &ramrod_flags);
1770         if (rc != 0)
1771                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1772
1773         /* Cleanup UC list */
1774         vlan_mac_flags = 0;
1775         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1776         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1777                                  &ramrod_flags);
1778         if (rc != 0)
1779                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1780
1781         /***************** Now clean mcast object *****************************/
1782         rparam.mcast_obj = &bp->mcast_obj;
1783         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1784
1785         /* Add a DEL command... */
1786         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1787         if (rc < 0)
1788                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1789                           rc);
1790
1791         /* ...and wait until all pending commands are cleared */
1792         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1793         while (rc != 0) {
1794                 if (rc < 0) {
1795                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1796                                   rc);
1797                         return;
1798                 }
1799
1800                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1801         }
1802 }
1803
1804 #ifndef BNX2X_STOP_ON_ERROR
1805 #define LOAD_ERROR_EXIT(bp, label) \
1806         do { \
1807                 (bp)->state = BNX2X_STATE_ERROR; \
1808                 goto label; \
1809         } while (0)
1810 #else
1811 #define LOAD_ERROR_EXIT(bp, label) \
1812         do { \
1813                 (bp)->state = BNX2X_STATE_ERROR; \
1814                 (bp)->panic = 1; \
1815                 return -EBUSY; \
1816         } while (0)
1817 #endif
1818
1819 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1820 {
1821         /* build FW version dword */
1822         u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1823                     (BCM_5710_FW_MINOR_VERSION << 8) +
1824                     (BCM_5710_FW_REVISION_VERSION << 16) +
1825                     (BCM_5710_FW_ENGINEERING_VERSION << 24);
1826
1827         /* read loaded FW from chip */
1828         u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1829
1830         DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1831
1832         if (loaded_fw != my_fw) {
1833                 if (is_err)
1834                         BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1835                                   loaded_fw, my_fw);
1836                 return false;
1837         }
1838
1839         return true;
1840 }
1841
1842 /**
1843  * bnx2x_bz_fp - zero content of the fastpath structure.
1844  *
1845  * @bp:         driver handle
1846  * @index:      fastpath index to be zeroed
1847  *
1848  * Makes sure the contents of the bp->fp[index].napi is kept
1849  * intact.
1850  */
1851 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1852 {
1853         struct bnx2x_fastpath *fp = &bp->fp[index];
1854         struct napi_struct orig_napi = fp->napi;
1855         /* bzero bnx2x_fastpath contents */
1856         if (bp->stats_init)
1857                 memset(fp, 0, sizeof(*fp));
1858         else {
1859                 /* Keep Queue statistics */
1860                 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1861                 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1862
1863                 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1864                                           GFP_KERNEL);
1865                 if (tmp_eth_q_stats)
1866                         memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1867                                sizeof(struct bnx2x_eth_q_stats));
1868
1869                 tmp_eth_q_stats_old =
1870                         kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1871                                 GFP_KERNEL);
1872                 if (tmp_eth_q_stats_old)
1873                         memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1874                                sizeof(struct bnx2x_eth_q_stats_old));
1875
1876                 memset(fp, 0, sizeof(*fp));
1877
1878                 if (tmp_eth_q_stats) {
1879                         memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1880                                    sizeof(struct bnx2x_eth_q_stats));
1881                         kfree(tmp_eth_q_stats);
1882                 }
1883
1884                 if (tmp_eth_q_stats_old) {
1885                         memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1886                                sizeof(struct bnx2x_eth_q_stats_old));
1887                         kfree(tmp_eth_q_stats_old);
1888                 }
1889
1890         }
1891
1892         /* Restore the NAPI object as it has been already initialized */
1893         fp->napi = orig_napi;
1894
1895         fp->bp = bp;
1896         fp->index = index;
1897         if (IS_ETH_FP(fp))
1898                 fp->max_cos = bp->max_cos;
1899         else
1900                 /* Special queues support only one CoS */
1901                 fp->max_cos = 1;
1902
1903         /*
1904          * set the tpa flag for each queue. The tpa flag determines the queue
1905          * minimal size so it must be set prior to queue memory allocation
1906          */
1907         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1908                                   (bp->flags & GRO_ENABLE_FLAG &&
1909                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
1910         if (bp->flags & TPA_ENABLE_FLAG)
1911                 fp->mode = TPA_MODE_LRO;
1912         else if (bp->flags & GRO_ENABLE_FLAG)
1913                 fp->mode = TPA_MODE_GRO;
1914
1915 #ifdef BCM_CNIC
1916         /* We don't want TPA on an FCoE L2 ring */
1917         if (IS_FCOE_FP(fp))
1918                 fp->disable_tpa = 1;
1919 #endif
1920 }
1921
1922
1923 /* must be called with rtnl_lock */
1924 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1925 {
1926         int port = BP_PORT(bp);
1927         u32 load_code;
1928         int i, rc;
1929
1930 #ifdef BNX2X_STOP_ON_ERROR
1931         if (unlikely(bp->panic)) {
1932                 BNX2X_ERR("Can't load NIC when there is panic\n");
1933                 return -EPERM;
1934         }
1935 #endif
1936
1937         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1938
1939         /* Set the initial link reported state to link down */
1940         bnx2x_acquire_phy_lock(bp);
1941         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1942         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1943                 &bp->last_reported_link.link_report_flags);
1944         bnx2x_release_phy_lock(bp);
1945
1946         /* must be called before memory allocation and HW init */
1947         bnx2x_ilt_set_info(bp);
1948
1949         /*
1950          * Zero fastpath structures preserving invariants like napi, which are
1951          * allocated only once, fp index, max_cos, bp pointer.
1952          * Also set fp->disable_tpa.
1953          */
1954         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1955         for_each_queue(bp, i)
1956                 bnx2x_bz_fp(bp, i);
1957
1958
1959         /* Set the receive queues buffer size */
1960         bnx2x_set_rx_buf_size(bp);
1961
1962         if (bnx2x_alloc_mem(bp))
1963                 return -ENOMEM;
1964
1965         /* As long as bnx2x_alloc_mem() may possibly update
1966          * bp->num_queues, bnx2x_set_real_num_queues() should always
1967          * come after it.
1968          */
1969         rc = bnx2x_set_real_num_queues(bp);
1970         if (rc) {
1971                 BNX2X_ERR("Unable to set real_num_queues\n");
1972                 LOAD_ERROR_EXIT(bp, load_error0);
1973         }
1974
1975         /* configure multi cos mappings in kernel.
1976          * this configuration may be overriden by a multi class queue discipline
1977          * or by a dcbx negotiation result.
1978          */
1979         bnx2x_setup_tc(bp->dev, bp->max_cos);
1980
1981         bnx2x_napi_enable(bp);
1982
1983         /* set pf load just before approaching the MCP */
1984         bnx2x_set_pf_load(bp);
1985
1986         /* Send LOAD_REQUEST command to MCP
1987          * Returns the type of LOAD command:
1988          * if it is the first port to be initialized
1989          * common blocks should be initialized, otherwise - not
1990          */
1991         if (!BP_NOMCP(bp)) {
1992                 /* init fw_seq */
1993                 bp->fw_seq =
1994                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1995                          DRV_MSG_SEQ_NUMBER_MASK);
1996                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1997
1998                 /* Get current FW pulse sequence */
1999                 bp->fw_drv_pulse_wr_seq =
2000                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2001                          DRV_PULSE_SEQ_MASK);
2002                 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2003
2004                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
2005                 if (!load_code) {
2006                         BNX2X_ERR("MCP response failure, aborting\n");
2007                         rc = -EBUSY;
2008                         LOAD_ERROR_EXIT(bp, load_error1);
2009                 }
2010                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2011                         BNX2X_ERR("Driver load refused\n");
2012                         rc = -EBUSY; /* other port in diagnostic mode */
2013                         LOAD_ERROR_EXIT(bp, load_error1);
2014                 }
2015                 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2016                     load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2017                         /* abort nic load if version mismatch */
2018                         if (!bnx2x_test_firmware_version(bp, true)) {
2019                                 rc = -EBUSY;
2020                                 LOAD_ERROR_EXIT(bp, load_error2);
2021                         }
2022                 }
2023
2024         } else {
2025                 int path = BP_PATH(bp);
2026
2027                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2028                    path, load_count[path][0], load_count[path][1],
2029                    load_count[path][2]);
2030                 load_count[path][0]++;
2031                 load_count[path][1 + port]++;
2032                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2033                    path, load_count[path][0], load_count[path][1],
2034                    load_count[path][2]);
2035                 if (load_count[path][0] == 1)
2036                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2037                 else if (load_count[path][1 + port] == 1)
2038                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2039                 else
2040                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2041         }
2042
2043         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2044             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2045             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2046                 bp->port.pmf = 1;
2047                 /*
2048                  * We need the barrier to ensure the ordering between the
2049                  * writing to bp->port.pmf here and reading it from the
2050                  * bnx2x_periodic_task().
2051                  */
2052                 smp_mb();
2053         } else
2054                 bp->port.pmf = 0;
2055
2056         DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2057
2058         /* Init Function state controlling object */
2059         bnx2x__init_func_obj(bp);
2060
2061         /* Initialize HW */
2062         rc = bnx2x_init_hw(bp, load_code);
2063         if (rc) {
2064                 BNX2X_ERR("HW init failed, aborting\n");
2065                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2066                 LOAD_ERROR_EXIT(bp, load_error2);
2067         }
2068
2069         /* Connect to IRQs */
2070         rc = bnx2x_setup_irqs(bp);
2071         if (rc) {
2072                 BNX2X_ERR("IRQs setup failed\n");
2073                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2074                 LOAD_ERROR_EXIT(bp, load_error2);
2075         }
2076
2077         /* Setup NIC internals and enable interrupts */
2078         bnx2x_nic_init(bp, load_code);
2079
2080         /* Init per-function objects */
2081         bnx2x_init_bp_objs(bp);
2082
2083         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2084             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2085             (bp->common.shmem2_base)) {
2086                 if (SHMEM2_HAS(bp, dcc_support))
2087                         SHMEM2_WR(bp, dcc_support,
2088                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2089                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2090                 if (SHMEM2_HAS(bp, afex_driver_support))
2091                         SHMEM2_WR(bp, afex_driver_support,
2092                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2093         }
2094
2095         /* Set AFEX default VLAN tag to an invalid value */
2096         bp->afex_def_vlan_tag = -1;
2097
2098         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2099         rc = bnx2x_func_start(bp);
2100         if (rc) {
2101                 BNX2X_ERR("Function start failed!\n");
2102                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2103                 LOAD_ERROR_EXIT(bp, load_error3);
2104         }
2105
2106         /* Send LOAD_DONE command to MCP */
2107         if (!BP_NOMCP(bp)) {
2108                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2109                 if (!load_code) {
2110                         BNX2X_ERR("MCP response failure, aborting\n");
2111                         rc = -EBUSY;
2112                         LOAD_ERROR_EXIT(bp, load_error3);
2113                 }
2114         }
2115
2116         rc = bnx2x_setup_leading(bp);
2117         if (rc) {
2118                 BNX2X_ERR("Setup leading failed!\n");
2119                 LOAD_ERROR_EXIT(bp, load_error3);
2120         }
2121
2122 #ifdef BCM_CNIC
2123         /* Enable Timer scan */
2124         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2125 #endif
2126
2127         for_each_nondefault_queue(bp, i) {
2128                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2129                 if (rc) {
2130                         BNX2X_ERR("Queue setup failed\n");
2131                         LOAD_ERROR_EXIT(bp, load_error4);
2132                 }
2133         }
2134
2135         rc = bnx2x_init_rss_pf(bp);
2136         if (rc) {
2137                 BNX2X_ERR("PF RSS init failed\n");
2138                 LOAD_ERROR_EXIT(bp, load_error4);
2139         }
2140
2141         /* Now when Clients are configured we are ready to work */
2142         bp->state = BNX2X_STATE_OPEN;
2143
2144         /* Configure a ucast MAC */
2145         rc = bnx2x_set_eth_mac(bp, true);
2146         if (rc) {
2147                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2148                 LOAD_ERROR_EXIT(bp, load_error4);
2149         }
2150
2151         if (bp->pending_max) {
2152                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2153                 bp->pending_max = 0;
2154         }
2155
2156         if (bp->port.pmf)
2157                 bnx2x_initial_phy_init(bp, load_mode);
2158
2159         /* Start fast path */
2160
2161         /* Initialize Rx filter. */
2162         netif_addr_lock_bh(bp->dev);
2163         bnx2x_set_rx_mode(bp->dev);
2164         netif_addr_unlock_bh(bp->dev);
2165
2166         /* Start the Tx */
2167         switch (load_mode) {
2168         case LOAD_NORMAL:
2169                 /* Tx queue should be only reenabled */
2170                 netif_tx_wake_all_queues(bp->dev);
2171                 break;
2172
2173         case LOAD_OPEN:
2174                 netif_tx_start_all_queues(bp->dev);
2175                 smp_mb__after_clear_bit();
2176                 break;
2177
2178         case LOAD_DIAG:
2179                 bp->state = BNX2X_STATE_DIAG;
2180                 break;
2181
2182         default:
2183                 break;
2184         }
2185
2186         if (bp->port.pmf)
2187                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2188         else
2189                 bnx2x__link_status_update(bp);
2190
2191         /* start the timer */
2192         mod_timer(&bp->timer, jiffies + bp->current_interval);
2193
2194 #ifdef BCM_CNIC
2195         /* re-read iscsi info */
2196         bnx2x_get_iscsi_info(bp);
2197         bnx2x_setup_cnic_irq_info(bp);
2198         if (bp->state == BNX2X_STATE_OPEN)
2199                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2200 #endif
2201
2202         /* mark driver is loaded in shmem2 */
2203         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2204                 u32 val;
2205                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2206                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2207                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2208                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2209         }
2210
2211         /* Wait for all pending SP commands to complete */
2212         if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2213                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2214                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2215                 return -EBUSY;
2216         }
2217
2218         bnx2x_dcbx_init(bp);
2219         return 0;
2220
2221 #ifndef BNX2X_STOP_ON_ERROR
2222 load_error4:
2223 #ifdef BCM_CNIC
2224         /* Disable Timer scan */
2225         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2226 #endif
2227 load_error3:
2228         bnx2x_int_disable_sync(bp, 1);
2229
2230         /* Clean queueable objects */
2231         bnx2x_squeeze_objects(bp);
2232
2233         /* Free SKBs, SGEs, TPA pool and driver internals */
2234         bnx2x_free_skbs(bp);
2235         for_each_rx_queue(bp, i)
2236                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2237
2238         /* Release IRQs */
2239         bnx2x_free_irq(bp);
2240 load_error2:
2241         if (!BP_NOMCP(bp)) {
2242                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2243                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2244         }
2245
2246         bp->port.pmf = 0;
2247 load_error1:
2248         bnx2x_napi_disable(bp);
2249         /* clear pf_load status, as it was already set */
2250         bnx2x_clear_pf_load(bp);
2251 load_error0:
2252         bnx2x_free_mem(bp);
2253
2254         return rc;
2255 #endif /* ! BNX2X_STOP_ON_ERROR */
2256 }
2257
2258 /* must be called with rtnl_lock */
2259 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2260 {
2261         int i;
2262         bool global = false;
2263
2264         /* mark driver is unloaded in shmem2 */
2265         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2266                 u32 val;
2267                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2268                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2269                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2270         }
2271
2272         if ((bp->state == BNX2X_STATE_CLOSED) ||
2273             (bp->state == BNX2X_STATE_ERROR)) {
2274                 /* We can get here if the driver has been unloaded
2275                  * during parity error recovery and is either waiting for a
2276                  * leader to complete or for other functions to unload and
2277                  * then ifdown has been issued. In this case we want to
2278                  * unload and let other functions to complete a recovery
2279                  * process.
2280                  */
2281                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2282                 bp->is_leader = 0;
2283                 bnx2x_release_leader_lock(bp);
2284                 smp_mb();
2285
2286                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2287                 BNX2X_ERR("Can't unload in closed or error state\n");
2288                 return -EINVAL;
2289         }
2290
2291         /*
2292          * It's important to set the bp->state to the value different from
2293          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2294          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2295          */
2296         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2297         smp_mb();
2298
2299         /* Stop Tx */
2300         bnx2x_tx_disable(bp);
2301
2302 #ifdef BCM_CNIC
2303         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2304 #endif
2305
2306         bp->rx_mode = BNX2X_RX_MODE_NONE;
2307
2308         del_timer_sync(&bp->timer);
2309
2310         /* Set ALWAYS_ALIVE bit in shmem */
2311         bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2312
2313         bnx2x_drv_pulse(bp);
2314
2315         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2316         bnx2x_save_statistics(bp);
2317
2318         /* Cleanup the chip if needed */
2319         if (unload_mode != UNLOAD_RECOVERY)
2320                 bnx2x_chip_cleanup(bp, unload_mode);
2321         else {
2322                 /* Send the UNLOAD_REQUEST to the MCP */
2323                 bnx2x_send_unload_req(bp, unload_mode);
2324
2325                 /*
2326                  * Prevent transactions to host from the functions on the
2327                  * engine that doesn't reset global blocks in case of global
2328                  * attention once gloabl blocks are reset and gates are opened
2329                  * (the engine which leader will perform the recovery
2330                  * last).
2331                  */
2332                 if (!CHIP_IS_E1x(bp))
2333                         bnx2x_pf_disable(bp);
2334
2335                 /* Disable HW interrupts, NAPI */
2336                 bnx2x_netif_stop(bp, 1);
2337
2338                 /* Release IRQs */
2339                 bnx2x_free_irq(bp);
2340
2341                 /* Report UNLOAD_DONE to MCP */
2342                 bnx2x_send_unload_done(bp);
2343         }
2344
2345         /*
2346          * At this stage no more interrupts will arrive so we may safly clean
2347          * the queueable objects here in case they failed to get cleaned so far.
2348          */
2349         bnx2x_squeeze_objects(bp);
2350
2351         /* There should be no more pending SP commands at this stage */
2352         bp->sp_state = 0;
2353
2354         bp->port.pmf = 0;
2355
2356         /* Free SKBs, SGEs, TPA pool and driver internals */
2357         bnx2x_free_skbs(bp);
2358         for_each_rx_queue(bp, i)
2359                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2360
2361         bnx2x_free_mem(bp);
2362
2363         bp->state = BNX2X_STATE_CLOSED;
2364
2365         /* Check if there are pending parity attentions. If there are - set
2366          * RECOVERY_IN_PROGRESS.
2367          */
2368         if (bnx2x_chk_parity_attn(bp, &global, false)) {
2369                 bnx2x_set_reset_in_progress(bp);
2370
2371                 /* Set RESET_IS_GLOBAL if needed */
2372                 if (global)
2373                         bnx2x_set_reset_global(bp);
2374         }
2375
2376
2377         /* The last driver must disable a "close the gate" if there is no
2378          * parity attention or "process kill" pending.
2379          */
2380         if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2381                 bnx2x_disable_close_the_gate(bp);
2382
2383         return 0;
2384 }
2385
2386 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2387 {
2388         u16 pmcsr;
2389
2390         /* If there is no power capability, silently succeed */
2391         if (!bp->pm_cap) {
2392                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2393                 return 0;
2394         }
2395
2396         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2397
2398         switch (state) {
2399         case PCI_D0:
2400                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2401                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2402                                        PCI_PM_CTRL_PME_STATUS));
2403
2404                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2405                         /* delay required during transition out of D3hot */
2406                         msleep(20);
2407                 break;
2408
2409         case PCI_D3hot:
2410                 /* If there are other clients above don't
2411                    shut down the power */
2412                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2413                         return 0;
2414                 /* Don't shut down the power for emulation and FPGA */
2415                 if (CHIP_REV_IS_SLOW(bp))
2416                         return 0;
2417
2418                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2419                 pmcsr |= 3;
2420
2421                 if (bp->wol)
2422                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2423
2424                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2425                                       pmcsr);
2426
2427                 /* No more memory access after this point until
2428                 * device is brought back to D0.
2429                 */
2430                 break;
2431
2432         default:
2433                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2434                 return -EINVAL;
2435         }
2436         return 0;
2437 }
2438
2439 /*
2440  * net_device service functions
2441  */
2442 int bnx2x_poll(struct napi_struct *napi, int budget)
2443 {
2444         int work_done = 0;
2445         u8 cos;
2446         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2447                                                  napi);
2448         struct bnx2x *bp = fp->bp;
2449
2450         while (1) {
2451 #ifdef BNX2X_STOP_ON_ERROR
2452                 if (unlikely(bp->panic)) {
2453                         napi_complete(napi);
2454                         return 0;
2455                 }
2456 #endif
2457
2458                 for_each_cos_in_tx_queue(fp, cos)
2459                         if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2460                                 bnx2x_tx_int(bp, &fp->txdata[cos]);
2461
2462
2463                 if (bnx2x_has_rx_work(fp)) {
2464                         work_done += bnx2x_rx_int(fp, budget - work_done);
2465
2466                         /* must not complete if we consumed full budget */
2467                         if (work_done >= budget)
2468                                 break;
2469                 }
2470
2471                 /* Fall out from the NAPI loop if needed */
2472                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2473 #ifdef BCM_CNIC
2474                         /* No need to update SB for FCoE L2 ring as long as
2475                          * it's connected to the default SB and the SB
2476                          * has been updated when NAPI was scheduled.
2477                          */
2478                         if (IS_FCOE_FP(fp)) {
2479                                 napi_complete(napi);
2480                                 break;
2481                         }
2482 #endif
2483
2484                         bnx2x_update_fpsb_idx(fp);
2485                         /* bnx2x_has_rx_work() reads the status block,
2486                          * thus we need to ensure that status block indices
2487                          * have been actually read (bnx2x_update_fpsb_idx)
2488                          * prior to this check (bnx2x_has_rx_work) so that
2489                          * we won't write the "newer" value of the status block
2490                          * to IGU (if there was a DMA right after
2491                          * bnx2x_has_rx_work and if there is no rmb, the memory
2492                          * reading (bnx2x_update_fpsb_idx) may be postponed
2493                          * to right before bnx2x_ack_sb). In this case there
2494                          * will never be another interrupt until there is
2495                          * another update of the status block, while there
2496                          * is still unhandled work.
2497                          */
2498                         rmb();
2499
2500                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2501                                 napi_complete(napi);
2502                                 /* Re-enable interrupts */
2503                                 DP(NETIF_MSG_RX_STATUS,
2504                                    "Update index to %d\n", fp->fp_hc_idx);
2505                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2506                                              le16_to_cpu(fp->fp_hc_idx),
2507                                              IGU_INT_ENABLE, 1);
2508                                 break;
2509                         }
2510                 }
2511         }
2512
2513         return work_done;
2514 }
2515
2516 /* we split the first BD into headers and data BDs
2517  * to ease the pain of our fellow microcode engineers
2518  * we use one mapping for both BDs
2519  * So far this has only been observed to happen
2520  * in Other Operating Systems(TM)
2521  */
2522 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2523                                    struct bnx2x_fp_txdata *txdata,
2524                                    struct sw_tx_bd *tx_buf,
2525                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
2526                                    u16 bd_prod, int nbd)
2527 {
2528         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2529         struct eth_tx_bd *d_tx_bd;
2530         dma_addr_t mapping;
2531         int old_len = le16_to_cpu(h_tx_bd->nbytes);
2532
2533         /* first fix first BD */
2534         h_tx_bd->nbd = cpu_to_le16(nbd);
2535         h_tx_bd->nbytes = cpu_to_le16(hlen);
2536
2537         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2538            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2539
2540         /* now get a new data BD
2541          * (after the pbd) and fill it */
2542         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2543         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2544
2545         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2546                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2547
2548         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2549         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2550         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2551
2552         /* this marks the BD as one that has no individual mapping */
2553         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2554
2555         DP(NETIF_MSG_TX_QUEUED,
2556            "TSO split data size is %d (%x:%x)\n",
2557            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2558
2559         /* update tx_bd */
2560         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2561
2562         return bd_prod;
2563 }
2564
2565 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2566 {
2567         if (fix > 0)
2568                 csum = (u16) ~csum_fold(csum_sub(csum,
2569                                 csum_partial(t_header - fix, fix, 0)));
2570
2571         else if (fix < 0)
2572                 csum = (u16) ~csum_fold(csum_add(csum,
2573                                 csum_partial(t_header, -fix, 0)));
2574
2575         return swab16(csum);
2576 }
2577
2578 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2579 {
2580         u32 rc;
2581
2582         if (skb->ip_summed != CHECKSUM_PARTIAL)
2583                 rc = XMIT_PLAIN;
2584
2585         else {
2586                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2587                         rc = XMIT_CSUM_V6;
2588                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2589                                 rc |= XMIT_CSUM_TCP;
2590
2591                 } else {
2592                         rc = XMIT_CSUM_V4;
2593                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2594                                 rc |= XMIT_CSUM_TCP;
2595                 }
2596         }
2597
2598         if (skb_is_gso_v6(skb))
2599                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2600         else if (skb_is_gso(skb))
2601                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2602
2603         return rc;
2604 }
2605
2606 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2607 /* check if packet requires linearization (packet is too fragmented)
2608    no need to check fragmentation if page size > 8K (there will be no
2609    violation to FW restrictions) */
2610 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2611                              u32 xmit_type)
2612 {
2613         int to_copy = 0;
2614         int hlen = 0;
2615         int first_bd_sz = 0;
2616
2617         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2618         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2619
2620                 if (xmit_type & XMIT_GSO) {
2621                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2622                         /* Check if LSO packet needs to be copied:
2623                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2624                         int wnd_size = MAX_FETCH_BD - 3;
2625                         /* Number of windows to check */
2626                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2627                         int wnd_idx = 0;
2628                         int frag_idx = 0;
2629                         u32 wnd_sum = 0;
2630
2631                         /* Headers length */
2632                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2633                                 tcp_hdrlen(skb);
2634
2635                         /* Amount of data (w/o headers) on linear part of SKB*/
2636                         first_bd_sz = skb_headlen(skb) - hlen;
2637
2638                         wnd_sum  = first_bd_sz;
2639
2640                         /* Calculate the first sum - it's special */
2641                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2642                                 wnd_sum +=
2643                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2644
2645                         /* If there was data on linear skb data - check it */
2646                         if (first_bd_sz > 0) {
2647                                 if (unlikely(wnd_sum < lso_mss)) {
2648                                         to_copy = 1;
2649                                         goto exit_lbl;
2650                                 }
2651
2652                                 wnd_sum -= first_bd_sz;
2653                         }
2654
2655                         /* Others are easier: run through the frag list and
2656                            check all windows */
2657                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2658                                 wnd_sum +=
2659                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2660
2661                                 if (unlikely(wnd_sum < lso_mss)) {
2662                                         to_copy = 1;
2663                                         break;
2664                                 }
2665                                 wnd_sum -=
2666                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2667                         }
2668                 } else {
2669                         /* in non-LSO too fragmented packet should always
2670                            be linearized */
2671                         to_copy = 1;
2672                 }
2673         }
2674
2675 exit_lbl:
2676         if (unlikely(to_copy))
2677                 DP(NETIF_MSG_TX_QUEUED,
2678                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
2679                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2680                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2681
2682         return to_copy;
2683 }
2684 #endif
2685
2686 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2687                                         u32 xmit_type)
2688 {
2689         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2690                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2691                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2692         if ((xmit_type & XMIT_GSO_V6) &&
2693             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2694                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2695 }
2696
2697 /**
2698  * bnx2x_set_pbd_gso - update PBD in GSO case.
2699  *
2700  * @skb:        packet skb
2701  * @pbd:        parse BD
2702  * @xmit_type:  xmit flags
2703  */
2704 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2705                                      struct eth_tx_parse_bd_e1x *pbd,
2706                                      u32 xmit_type)
2707 {
2708         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2709         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2710         pbd->tcp_flags = pbd_tcp_flags(skb);
2711
2712         if (xmit_type & XMIT_GSO_V4) {
2713                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2714                 pbd->tcp_pseudo_csum =
2715                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2716                                                   ip_hdr(skb)->daddr,
2717                                                   0, IPPROTO_TCP, 0));
2718
2719         } else
2720                 pbd->tcp_pseudo_csum =
2721                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2722                                                 &ipv6_hdr(skb)->daddr,
2723                                                 0, IPPROTO_TCP, 0));
2724
2725         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2726 }
2727
2728 /**
2729  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2730  *
2731  * @bp:                 driver handle
2732  * @skb:                packet skb
2733  * @parsing_data:       data to be updated
2734  * @xmit_type:          xmit flags
2735  *
2736  * 57712 related
2737  */
2738 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2739         u32 *parsing_data, u32 xmit_type)
2740 {
2741         *parsing_data |=
2742                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2743                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2744                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2745
2746         if (xmit_type & XMIT_CSUM_TCP) {
2747                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2748                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2749                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2750
2751                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2752         } else
2753                 /* We support checksum offload for TCP and UDP only.
2754                  * No need to pass the UDP header length - it's a constant.
2755                  */
2756                 return skb_transport_header(skb) +
2757                                 sizeof(struct udphdr) - skb->data;
2758 }
2759
2760 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2761         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2762 {
2763         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2764
2765         if (xmit_type & XMIT_CSUM_V4)
2766                 tx_start_bd->bd_flags.as_bitfield |=
2767                                         ETH_TX_BD_FLAGS_IP_CSUM;
2768         else
2769                 tx_start_bd->bd_flags.as_bitfield |=
2770                                         ETH_TX_BD_FLAGS_IPV6;
2771
2772         if (!(xmit_type & XMIT_CSUM_TCP))
2773                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2774 }
2775
2776 /**
2777  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2778  *
2779  * @bp:         driver handle
2780  * @skb:        packet skb
2781  * @pbd:        parse BD to be updated
2782  * @xmit_type:  xmit flags
2783  */
2784 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2785         struct eth_tx_parse_bd_e1x *pbd,
2786         u32 xmit_type)
2787 {
2788         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2789
2790         /* for now NS flag is not used in Linux */
2791         pbd->global_data =
2792                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2793                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2794
2795         pbd->ip_hlen_w = (skb_transport_header(skb) -
2796                         skb_network_header(skb)) >> 1;
2797
2798         hlen += pbd->ip_hlen_w;
2799
2800         /* We support checksum offload for TCP and UDP only */
2801         if (xmit_type & XMIT_CSUM_TCP)
2802                 hlen += tcp_hdrlen(skb) / 2;
2803         else
2804                 hlen += sizeof(struct udphdr) / 2;
2805
2806         pbd->total_hlen_w = cpu_to_le16(hlen);
2807         hlen = hlen*2;
2808
2809         if (xmit_type & XMIT_CSUM_TCP) {
2810                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2811
2812         } else {
2813                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2814
2815                 DP(NETIF_MSG_TX_QUEUED,
2816                    "hlen %d  fix %d  csum before fix %x\n",
2817                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2818
2819                 /* HW bug: fixup the CSUM */
2820                 pbd->tcp_pseudo_csum =
2821                         bnx2x_csum_fix(skb_transport_header(skb),
2822                                        SKB_CS(skb), fix);
2823
2824                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2825                    pbd->tcp_pseudo_csum);
2826         }
2827
2828         return hlen;
2829 }
2830
2831 /* called with netif_tx_lock
2832  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2833  * netif_wake_queue()
2834  */
2835 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2836 {
2837         struct bnx2x *bp = netdev_priv(dev);
2838
2839         struct bnx2x_fastpath *fp;
2840         struct netdev_queue *txq;
2841         struct bnx2x_fp_txdata *txdata;
2842         struct sw_tx_bd *tx_buf;
2843         struct eth_tx_start_bd *tx_start_bd, *first_bd;
2844         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2845         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2846         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2847         u32 pbd_e2_parsing_data = 0;
2848         u16 pkt_prod, bd_prod;
2849         int nbd, txq_index, fp_index, txdata_index;
2850         dma_addr_t mapping;
2851         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2852         int i;
2853         u8 hlen = 0;
2854         __le16 pkt_size = 0;
2855         struct ethhdr *eth;
2856         u8 mac_type = UNICAST_ADDRESS;
2857
2858 #ifdef BNX2X_STOP_ON_ERROR
2859         if (unlikely(bp->panic))
2860                 return NETDEV_TX_BUSY;
2861 #endif
2862
2863         txq_index = skb_get_queue_mapping(skb);
2864         txq = netdev_get_tx_queue(dev, txq_index);
2865
2866         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2867
2868         /* decode the fastpath index and the cos index from the txq */
2869         fp_index = TXQ_TO_FP(txq_index);
2870         txdata_index = TXQ_TO_COS(txq_index);
2871
2872 #ifdef BCM_CNIC
2873         /*
2874          * Override the above for the FCoE queue:
2875          *   - FCoE fp entry is right after the ETH entries.
2876          *   - FCoE L2 queue uses bp->txdata[0] only.
2877          */
2878         if (unlikely(!NO_FCOE(bp) && (txq_index ==
2879                                       bnx2x_fcoe_tx(bp, txq_index)))) {
2880                 fp_index = FCOE_IDX;
2881                 txdata_index = 0;
2882         }
2883 #endif
2884
2885         /* enable this debug print to view the transmission queue being used
2886         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2887            txq_index, fp_index, txdata_index); */
2888
2889         /* locate the fastpath and the txdata */
2890         fp = &bp->fp[fp_index];
2891         txdata = &fp->txdata[txdata_index];
2892
2893         /* enable this debug print to view the tranmission details
2894         DP(NETIF_MSG_TX_QUEUED,
2895            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
2896            txdata->cid, fp_index, txdata_index, txdata, fp); */
2897
2898         if (unlikely(bnx2x_tx_avail(bp, txdata) <
2899                      (skb_shinfo(skb)->nr_frags + 3))) {
2900                 fp->eth_q_stats.driver_xoff++;
2901                 netif_tx_stop_queue(txq);
2902                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2903                 return NETDEV_TX_BUSY;
2904         }
2905
2906         DP(NETIF_MSG_TX_QUEUED,
2907            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x\n",
2908            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2909            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2910
2911         eth = (struct ethhdr *)skb->data;
2912
2913         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2914         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2915                 if (is_broadcast_ether_addr(eth->h_dest))
2916                         mac_type = BROADCAST_ADDRESS;
2917                 else
2918                         mac_type = MULTICAST_ADDRESS;
2919         }
2920
2921 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2922         /* First, check if we need to linearize the skb (due to FW
2923            restrictions). No need to check fragmentation if page size > 8K
2924            (there will be no violation to FW restrictions) */
2925         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2926                 /* Statistics of linearization */
2927                 bp->lin_cnt++;
2928                 if (skb_linearize(skb) != 0) {
2929                         DP(NETIF_MSG_TX_QUEUED,
2930                            "SKB linearization failed - silently dropping this SKB\n");
2931                         dev_kfree_skb_any(skb);
2932                         return NETDEV_TX_OK;
2933                 }
2934         }
2935 #endif
2936         /* Map skb linear data for DMA */
2937         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2938                                  skb_headlen(skb), DMA_TO_DEVICE);
2939         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2940                 DP(NETIF_MSG_TX_QUEUED,
2941                    "SKB mapping failed - silently dropping this SKB\n");
2942                 dev_kfree_skb_any(skb);
2943                 return NETDEV_TX_OK;
2944         }
2945         /*
2946         Please read carefully. First we use one BD which we mark as start,
2947         then we have a parsing info BD (used for TSO or xsum),
2948         and only then we have the rest of the TSO BDs.
2949         (don't forget to mark the last one as last,
2950         and to unmap only AFTER you write to the BD ...)
2951         And above all, all pdb sizes are in words - NOT DWORDS!
2952         */
2953
2954         /* get current pkt produced now - advance it just before sending packet
2955          * since mapping of pages may fail and cause packet to be dropped
2956          */
2957         pkt_prod = txdata->tx_pkt_prod;
2958         bd_prod = TX_BD(txdata->tx_bd_prod);
2959
2960         /* get a tx_buf and first BD
2961          * tx_start_bd may be changed during SPLIT,
2962          * but first_bd will always stay first
2963          */
2964         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2965         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2966         first_bd = tx_start_bd;
2967
2968         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2969         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2970                  mac_type);
2971
2972         /* header nbd */
2973         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2974
2975         /* remember the first BD of the packet */
2976         tx_buf->first_bd = txdata->tx_bd_prod;
2977         tx_buf->skb = skb;
2978         tx_buf->flags = 0;
2979
2980         DP(NETIF_MSG_TX_QUEUED,
2981            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2982            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2983
2984         if (vlan_tx_tag_present(skb)) {
2985                 tx_start_bd->vlan_or_ethertype =
2986                     cpu_to_le16(vlan_tx_tag_get(skb));
2987                 tx_start_bd->bd_flags.as_bitfield |=
2988                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2989         } else
2990                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2991
2992         /* turn on parsing and get a BD */
2993         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2994
2995         if (xmit_type & XMIT_CSUM)
2996                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2997
2998         if (!CHIP_IS_E1x(bp)) {
2999                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3000                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3001                 /* Set PBD in checksum offload case */
3002                 if (xmit_type & XMIT_CSUM)
3003                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3004                                                      &pbd_e2_parsing_data,
3005                                                      xmit_type);
3006                 if (IS_MF_SI(bp)) {
3007                         /*
3008                          * fill in the MAC addresses in the PBD - for local
3009                          * switching
3010                          */
3011                         bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3012                                               &pbd_e2->src_mac_addr_mid,
3013                                               &pbd_e2->src_mac_addr_lo,
3014                                               eth->h_source);
3015                         bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3016                                               &pbd_e2->dst_mac_addr_mid,
3017                                               &pbd_e2->dst_mac_addr_lo,
3018                                               eth->h_dest);
3019                 }
3020         } else {
3021                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3022                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3023                 /* Set PBD in checksum offload case */
3024                 if (xmit_type & XMIT_CSUM)
3025                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3026
3027         }
3028
3029         /* Setup the data pointer of the first BD of the packet */
3030         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3031         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3032         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3033         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3034         pkt_size = tx_start_bd->nbytes;
3035
3036         DP(NETIF_MSG_TX_QUEUED,
3037            "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
3038            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3039            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3040            tx_start_bd->bd_flags.as_bitfield,
3041            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3042
3043         if (xmit_type & XMIT_GSO) {
3044
3045                 DP(NETIF_MSG_TX_QUEUED,
3046                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3047                    skb->len, hlen, skb_headlen(skb),
3048                    skb_shinfo(skb)->gso_size);
3049
3050                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3051
3052                 if (unlikely(skb_headlen(skb) > hlen))
3053                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3054                                                  &tx_start_bd, hlen,
3055                                                  bd_prod, ++nbd);
3056                 if (!CHIP_IS_E1x(bp))
3057                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3058                                              xmit_type);
3059                 else
3060                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3061         }
3062
3063         /* Set the PBD's parsing_data field if not zero
3064          * (for the chips newer than 57711).
3065          */
3066         if (pbd_e2_parsing_data)
3067                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3068
3069         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3070
3071         /* Handle fragmented skb */
3072         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3073                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3074
3075                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3076                                            skb_frag_size(frag), DMA_TO_DEVICE);
3077                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3078                         unsigned int pkts_compl = 0, bytes_compl = 0;
3079
3080                         DP(NETIF_MSG_TX_QUEUED,
3081                            "Unable to map page - dropping packet...\n");
3082
3083                         /* we need unmap all buffers already mapped
3084                          * for this SKB;
3085                          * first_bd->nbd need to be properly updated
3086                          * before call to bnx2x_free_tx_pkt
3087                          */
3088                         first_bd->nbd = cpu_to_le16(nbd);
3089                         bnx2x_free_tx_pkt(bp, txdata,
3090                                           TX_BD(txdata->tx_pkt_prod),
3091                                           &pkts_compl, &bytes_compl);
3092                         return NETDEV_TX_OK;
3093                 }
3094
3095                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3096                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3097                 if (total_pkt_bd == NULL)
3098                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3099
3100                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3101                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3102                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3103                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3104                 nbd++;
3105
3106                 DP(NETIF_MSG_TX_QUEUED,
3107                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3108                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3109                    le16_to_cpu(tx_data_bd->nbytes));
3110         }
3111
3112         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3113
3114         /* update with actual num BDs */
3115         first_bd->nbd = cpu_to_le16(nbd);
3116
3117         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3118
3119         /* now send a tx doorbell, counting the next BD
3120          * if the packet contains or ends with it
3121          */
3122         if (TX_BD_POFF(bd_prod) < nbd)
3123                 nbd++;
3124
3125         /* total_pkt_bytes should be set on the first data BD if
3126          * it's not an LSO packet and there is more than one
3127          * data BD. In this case pkt_size is limited by an MTU value.
3128          * However we prefer to set it for an LSO packet (while we don't
3129          * have to) in order to save some CPU cycles in a none-LSO
3130          * case, when we much more care about them.
3131          */
3132         if (total_pkt_bd != NULL)
3133                 total_pkt_bd->total_pkt_bytes = pkt_size;
3134
3135         if (pbd_e1x)
3136                 DP(NETIF_MSG_TX_QUEUED,
3137                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3138                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3139                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3140                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3141                     le16_to_cpu(pbd_e1x->total_hlen_w));
3142         if (pbd_e2)
3143                 DP(NETIF_MSG_TX_QUEUED,
3144                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3145                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3146                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3147                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3148                    pbd_e2->parsing_data);
3149         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3150
3151         netdev_tx_sent_queue(txq, skb->len);
3152
3153         skb_tx_timestamp(skb);
3154
3155         txdata->tx_pkt_prod++;
3156         /*
3157          * Make sure that the BD data is updated before updating the producer
3158          * since FW might read the BD right after the producer is updated.
3159          * This is only applicable for weak-ordered memory model archs such
3160          * as IA-64. The following barrier is also mandatory since FW will
3161          * assumes packets must have BDs.
3162          */
3163         wmb();
3164
3165         txdata->tx_db.data.prod += nbd;
3166         barrier();
3167
3168         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3169
3170         mmiowb();
3171
3172         txdata->tx_bd_prod += nbd;
3173
3174         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
3175                 netif_tx_stop_queue(txq);
3176
3177                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3178                  * ordering of set_bit() in netif_tx_stop_queue() and read of
3179                  * fp->bd_tx_cons */
3180                 smp_mb();
3181
3182                 fp->eth_q_stats.driver_xoff++;
3183                 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
3184                         netif_tx_wake_queue(txq);
3185         }
3186         txdata->tx_pkt++;
3187
3188         return NETDEV_TX_OK;
3189 }
3190
3191 /**
3192  * bnx2x_setup_tc - routine to configure net_device for multi tc
3193  *
3194  * @netdev: net device to configure
3195  * @tc: number of traffic classes to enable
3196  *
3197  * callback connected to the ndo_setup_tc function pointer
3198  */
3199 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3200 {
3201         int cos, prio, count, offset;
3202         struct bnx2x *bp = netdev_priv(dev);
3203
3204         /* setup tc must be called under rtnl lock */
3205         ASSERT_RTNL();
3206
3207         /* no traffic classes requested. aborting */
3208         if (!num_tc) {
3209                 netdev_reset_tc(dev);
3210                 return 0;
3211         }
3212
3213         /* requested to support too many traffic classes */
3214         if (num_tc > bp->max_cos) {
3215                 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3216                           num_tc, bp->max_cos);
3217                 return -EINVAL;
3218         }
3219
3220         /* declare amount of supported traffic classes */
3221         if (netdev_set_num_tc(dev, num_tc)) {
3222                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3223                 return -EINVAL;
3224         }
3225
3226         /* configure priority to traffic class mapping */
3227         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3228                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3229                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3230                    "mapping priority %d to tc %d\n",
3231                    prio, bp->prio_to_cos[prio]);
3232         }
3233
3234
3235         /* Use this configuration to diffrentiate tc0 from other COSes
3236            This can be used for ets or pfc, and save the effort of setting
3237            up a multio class queue disc or negotiating DCBX with a switch
3238         netdev_set_prio_tc_map(dev, 0, 0);
3239         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3240         for (prio = 1; prio < 16; prio++) {
3241                 netdev_set_prio_tc_map(dev, prio, 1);
3242                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3243         } */
3244
3245         /* configure traffic class to transmission queue mapping */
3246         for (cos = 0; cos < bp->max_cos; cos++) {
3247                 count = BNX2X_NUM_ETH_QUEUES(bp);
3248                 offset = cos * MAX_TXQS_PER_COS;
3249                 netdev_set_tc_queue(dev, cos, count, offset);
3250                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3251                    "mapping tc %d to offset %d count %d\n",
3252                    cos, offset, count);
3253         }
3254
3255         return 0;
3256 }
3257
3258 /* called with rtnl_lock */
3259 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3260 {
3261         struct sockaddr *addr = p;
3262         struct bnx2x *bp = netdev_priv(dev);
3263         int rc = 0;
3264
3265         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3266                 BNX2X_ERR("Requested MAC address is not valid\n");
3267                 return -EINVAL;
3268         }
3269
3270 #ifdef BCM_CNIC
3271         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3272             !is_zero_ether_addr(addr->sa_data)) {
3273                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3274                 return -EINVAL;
3275         }
3276 #endif
3277
3278         if (netif_running(dev))  {
3279                 rc = bnx2x_set_eth_mac(bp, false);
3280                 if (rc)
3281                         return rc;
3282         }
3283
3284         dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3285         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3286
3287         if (netif_running(dev))
3288                 rc = bnx2x_set_eth_mac(bp, true);
3289
3290         return rc;
3291 }
3292
3293 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3294 {
3295         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3296         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3297         u8 cos;
3298
3299         /* Common */
3300 #ifdef BCM_CNIC
3301         if (IS_FCOE_IDX(fp_index)) {
3302                 memset(sb, 0, sizeof(union host_hc_status_block));
3303                 fp->status_blk_mapping = 0;
3304
3305         } else {
3306 #endif
3307                 /* status blocks */
3308                 if (!CHIP_IS_E1x(bp))
3309                         BNX2X_PCI_FREE(sb->e2_sb,
3310                                        bnx2x_fp(bp, fp_index,
3311                                                 status_blk_mapping),
3312                                        sizeof(struct host_hc_status_block_e2));
3313                 else
3314                         BNX2X_PCI_FREE(sb->e1x_sb,
3315                                        bnx2x_fp(bp, fp_index,
3316                                                 status_blk_mapping),
3317                                        sizeof(struct host_hc_status_block_e1x));
3318 #ifdef BCM_CNIC
3319         }
3320 #endif
3321         /* Rx */
3322         if (!skip_rx_queue(bp, fp_index)) {
3323                 bnx2x_free_rx_bds(fp);
3324
3325                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3326                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3327                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3328                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
3329                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
3330
3331                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3332                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
3333                                sizeof(struct eth_fast_path_rx_cqe) *
3334                                NUM_RCQ_BD);
3335
3336                 /* SGE ring */
3337                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3338                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3339                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
3340                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3341         }
3342
3343         /* Tx */
3344         if (!skip_tx_queue(bp, fp_index)) {
3345                 /* fastpath tx rings: tx_buf tx_desc */
3346                 for_each_cos_in_tx_queue(fp, cos) {
3347                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3348
3349                         DP(NETIF_MSG_IFDOWN,
3350                            "freeing tx memory of fp %d cos %d cid %d\n",
3351                            fp_index, cos, txdata->cid);
3352
3353                         BNX2X_FREE(txdata->tx_buf_ring);
3354                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
3355                                 txdata->tx_desc_mapping,
3356                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3357                 }
3358         }
3359         /* end of fastpath */
3360 }
3361
3362 void bnx2x_free_fp_mem(struct bnx2x *bp)
3363 {
3364         int i;
3365         for_each_queue(bp, i)
3366                 bnx2x_free_fp_mem_at(bp, i);
3367 }
3368
3369 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3370 {
3371         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3372         if (!CHIP_IS_E1x(bp)) {
3373                 bnx2x_fp(bp, index, sb_index_values) =
3374                         (__le16 *)status_blk.e2_sb->sb.index_values;
3375                 bnx2x_fp(bp, index, sb_running_index) =
3376                         (__le16 *)status_blk.e2_sb->sb.running_index;
3377         } else {
3378                 bnx2x_fp(bp, index, sb_index_values) =
3379                         (__le16 *)status_blk.e1x_sb->sb.index_values;
3380                 bnx2x_fp(bp, index, sb_running_index) =
3381                         (__le16 *)status_blk.e1x_sb->sb.running_index;
3382         }
3383 }
3384
3385 /* Returns the number of actually allocated BDs */
3386 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3387                               int rx_ring_size)
3388 {
3389         struct bnx2x *bp = fp->bp;
3390         u16 ring_prod, cqe_ring_prod;
3391         int i, failure_cnt = 0;
3392
3393         fp->rx_comp_cons = 0;
3394         cqe_ring_prod = ring_prod = 0;
3395
3396         /* This routine is called only during fo init so
3397          * fp->eth_q_stats.rx_skb_alloc_failed = 0
3398          */
3399         for (i = 0; i < rx_ring_size; i++) {
3400                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3401                         failure_cnt++;
3402                         continue;
3403                 }
3404                 ring_prod = NEXT_RX_IDX(ring_prod);
3405                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3406                 WARN_ON(ring_prod <= (i - failure_cnt));
3407         }
3408
3409         if (failure_cnt)
3410                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3411                           i - failure_cnt, fp->index);
3412
3413         fp->rx_bd_prod = ring_prod;
3414         /* Limit the CQE producer by the CQE ring size */
3415         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3416                                cqe_ring_prod);
3417         fp->rx_pkt = fp->rx_calls = 0;
3418
3419         fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3420
3421         return i - failure_cnt;
3422 }
3423
3424 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3425 {
3426         int i;
3427
3428         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3429                 struct eth_rx_cqe_next_page *nextpg;
3430
3431                 nextpg = (struct eth_rx_cqe_next_page *)
3432                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3433                 nextpg->addr_hi =
3434                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3435                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3436                 nextpg->addr_lo =
3437                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3438                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3439         }
3440 }
3441
3442 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3443 {
3444         union host_hc_status_block *sb;
3445         struct bnx2x_fastpath *fp = &bp->fp[index];
3446         int ring_size = 0;
3447         u8 cos;
3448         int rx_ring_size = 0;
3449
3450 #ifdef BCM_CNIC
3451         if (!bp->rx_ring_size &&
3452             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3453                 rx_ring_size = MIN_RX_SIZE_NONTPA;
3454                 bp->rx_ring_size = rx_ring_size;
3455         } else
3456 #endif
3457         if (!bp->rx_ring_size) {
3458                 u32 cfg = SHMEM_RD(bp,
3459                              dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3460
3461                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3462
3463                 /* Dercease ring size for 1G functions */
3464                 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3465                     PORT_HW_CFG_NET_SERDES_IF_SGMII)
3466                         rx_ring_size /= 10;
3467
3468                 /* allocate at least number of buffers required by FW */
3469                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3470                                      MIN_RX_SIZE_TPA, rx_ring_size);
3471
3472                 bp->rx_ring_size = rx_ring_size;
3473         } else /* if rx_ring_size specified - use it */
3474                 rx_ring_size = bp->rx_ring_size;
3475
3476         /* Common */
3477         sb = &bnx2x_fp(bp, index, status_blk);
3478 #ifdef BCM_CNIC
3479         if (!IS_FCOE_IDX(index)) {
3480 #endif
3481                 /* status blocks */
3482                 if (!CHIP_IS_E1x(bp))
3483                         BNX2X_PCI_ALLOC(sb->e2_sb,
3484                                 &bnx2x_fp(bp, index, status_blk_mapping),
3485                                 sizeof(struct host_hc_status_block_e2));
3486                 else
3487                         BNX2X_PCI_ALLOC(sb->e1x_sb,
3488                                 &bnx2x_fp(bp, index, status_blk_mapping),
3489                             sizeof(struct host_hc_status_block_e1x));
3490 #ifdef BCM_CNIC
3491         }
3492 #endif
3493
3494         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3495          * set shortcuts for it.
3496          */
3497         if (!IS_FCOE_IDX(index))
3498                 set_sb_shortcuts(bp, index);
3499
3500         /* Tx */
3501         if (!skip_tx_queue(bp, index)) {
3502                 /* fastpath tx rings: tx_buf tx_desc */
3503                 for_each_cos_in_tx_queue(fp, cos) {
3504                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3505
3506                         DP(NETIF_MSG_IFUP,
3507                            "allocating tx memory of fp %d cos %d\n",
3508                            index, cos);
3509
3510                         BNX2X_ALLOC(txdata->tx_buf_ring,
3511                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3512                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3513                                 &txdata->tx_desc_mapping,
3514                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3515                 }
3516         }
3517
3518         /* Rx */
3519         if (!skip_rx_queue(bp, index)) {
3520                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3521                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3522                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3523                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3524                                 &bnx2x_fp(bp, index, rx_desc_mapping),
3525                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3526
3527                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3528                                 &bnx2x_fp(bp, index, rx_comp_mapping),
3529                                 sizeof(struct eth_fast_path_rx_cqe) *
3530                                 NUM_RCQ_BD);
3531
3532                 /* SGE ring */
3533                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3534                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3535                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3536                                 &bnx2x_fp(bp, index, rx_sge_mapping),
3537                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3538                 /* RX BD ring */
3539                 bnx2x_set_next_page_rx_bd(fp);
3540
3541                 /* CQ ring */
3542                 bnx2x_set_next_page_rx_cq(fp);
3543
3544                 /* BDs */
3545                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3546                 if (ring_size < rx_ring_size)
3547                         goto alloc_mem_err;
3548         }
3549
3550         return 0;
3551
3552 /* handles low memory cases */
3553 alloc_mem_err:
3554         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3555                                                 index, ring_size);
3556         /* FW will drop all packets if queue is not big enough,
3557          * In these cases we disable the queue
3558          * Min size is different for OOO, TPA and non-TPA queues
3559          */
3560         if (ring_size < (fp->disable_tpa ?
3561                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3562                         /* release memory allocated for this queue */
3563                         bnx2x_free_fp_mem_at(bp, index);
3564                         return -ENOMEM;
3565         }
3566         return 0;
3567 }
3568
3569 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3570 {
3571         int i;
3572
3573         /**
3574          * 1. Allocate FP for leading - fatal if error
3575          * 2. {CNIC} Allocate FCoE FP - fatal if error
3576          * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3577          * 4. Allocate RSS - fix number of queues if error
3578          */
3579
3580         /* leading */
3581         if (bnx2x_alloc_fp_mem_at(bp, 0))
3582                 return -ENOMEM;
3583
3584 #ifdef BCM_CNIC
3585         if (!NO_FCOE(bp))
3586                 /* FCoE */
3587                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3588                         /* we will fail load process instead of mark
3589                          * NO_FCOE_FLAG
3590                          */
3591                         return -ENOMEM;
3592 #endif
3593
3594         /* RSS */
3595         for_each_nondefault_eth_queue(bp, i)
3596                 if (bnx2x_alloc_fp_mem_at(bp, i))
3597                         break;
3598
3599         /* handle memory failures */
3600         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3601                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3602
3603                 WARN_ON(delta < 0);
3604 #ifdef BCM_CNIC
3605                 /**
3606                  * move non eth FPs next to last eth FP
3607                  * must be done in that order
3608                  * FCOE_IDX < FWD_IDX < OOO_IDX
3609                  */
3610
3611                 /* move FCoE fp even NO_FCOE_FLAG is on */
3612                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3613 #endif
3614                 bp->num_queues -= delta;
3615                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3616                           bp->num_queues + delta, bp->num_queues);
3617         }
3618
3619         return 0;
3620 }
3621
3622 void bnx2x_free_mem_bp(struct bnx2x *bp)
3623 {
3624         kfree(bp->fp);
3625         kfree(bp->msix_table);
3626         kfree(bp->ilt);
3627 }
3628
3629 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3630 {
3631         struct bnx2x_fastpath *fp;
3632         struct msix_entry *tbl;
3633         struct bnx2x_ilt *ilt;
3634         int msix_table_size = 0;
3635
3636         /*
3637          * The biggest MSI-X table we might need is as a maximum number of fast
3638          * path IGU SBs plus default SB (for PF).
3639          */
3640         msix_table_size = bp->igu_sb_cnt + 1;
3641
3642         /* fp array: RSS plus CNIC related L2 queues */
3643         fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3644                      sizeof(*fp), GFP_KERNEL);
3645         if (!fp)
3646                 goto alloc_err;
3647         bp->fp = fp;
3648
3649         /* msix table */
3650         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3651         if (!tbl)
3652                 goto alloc_err;
3653         bp->msix_table = tbl;
3654
3655         /* ilt */
3656         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3657         if (!ilt)
3658                 goto alloc_err;
3659         bp->ilt = ilt;
3660
3661         return 0;
3662 alloc_err:
3663         bnx2x_free_mem_bp(bp);
3664         return -ENOMEM;
3665
3666 }
3667
3668 int bnx2x_reload_if_running(struct net_device *dev)
3669 {
3670         struct bnx2x *bp = netdev_priv(dev);
3671
3672         if (unlikely(!netif_running(dev)))
3673                 return 0;
3674
3675         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3676         return bnx2x_nic_load(bp, LOAD_NORMAL);
3677 }
3678
3679 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3680 {
3681         u32 sel_phy_idx = 0;
3682         if (bp->link_params.num_phys <= 1)
3683                 return INT_PHY;
3684
3685         if (bp->link_vars.link_up) {
3686                 sel_phy_idx = EXT_PHY1;
3687                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3688                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3689                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3690                         sel_phy_idx = EXT_PHY2;
3691         } else {
3692
3693                 switch (bnx2x_phy_selection(&bp->link_params)) {
3694                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3695                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3696                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3697                        sel_phy_idx = EXT_PHY1;
3698                        break;
3699                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3700                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3701                        sel_phy_idx = EXT_PHY2;
3702                        break;
3703                 }
3704         }
3705
3706         return sel_phy_idx;
3707
3708 }
3709 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3710 {
3711         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3712         /*
3713          * The selected actived PHY is always after swapping (in case PHY
3714          * swapping is enabled). So when swapping is enabled, we need to reverse
3715          * the configuration
3716          */
3717
3718         if (bp->link_params.multi_phy_config &
3719             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3720                 if (sel_phy_idx == EXT_PHY1)
3721                         sel_phy_idx = EXT_PHY2;
3722                 else if (sel_phy_idx == EXT_PHY2)
3723                         sel_phy_idx = EXT_PHY1;
3724         }
3725         return LINK_CONFIG_IDX(sel_phy_idx);
3726 }
3727
3728 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3729 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3730 {
3731         struct bnx2x *bp = netdev_priv(dev);
3732         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3733
3734         switch (type) {
3735         case NETDEV_FCOE_WWNN:
3736                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3737                                 cp->fcoe_wwn_node_name_lo);
3738                 break;
3739         case NETDEV_FCOE_WWPN:
3740                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3741                                 cp->fcoe_wwn_port_name_lo);
3742                 break;
3743         default:
3744                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3745                 return -EINVAL;
3746         }
3747
3748         return 0;
3749 }
3750 #endif
3751
3752 /* called with rtnl_lock */
3753 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3754 {
3755         struct bnx2x *bp = netdev_priv(dev);
3756
3757         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3758                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3759                 return -EAGAIN;
3760         }
3761
3762         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3763             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3764                 BNX2X_ERR("Can't support requested MTU size\n");
3765                 return -EINVAL;
3766         }
3767
3768         /* This does not race with packet allocation
3769          * because the actual alloc size is
3770          * only updated as part of load
3771          */
3772         dev->mtu = new_mtu;
3773
3774         return bnx2x_reload_if_running(dev);
3775 }
3776
3777 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3778                                      netdev_features_t features)
3779 {
3780         struct bnx2x *bp = netdev_priv(dev);
3781
3782         /* TPA requires Rx CSUM offloading */
3783         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3784                 features &= ~NETIF_F_LRO;
3785                 features &= ~NETIF_F_GRO;
3786         }
3787
3788         return features;
3789 }
3790
3791 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3792 {
3793         struct bnx2x *bp = netdev_priv(dev);
3794         u32 flags = bp->flags;
3795         bool bnx2x_reload = false;
3796
3797         if (features & NETIF_F_LRO)
3798                 flags |= TPA_ENABLE_FLAG;
3799         else
3800                 flags &= ~TPA_ENABLE_FLAG;
3801
3802         if (features & NETIF_F_GRO)
3803                 flags |= GRO_ENABLE_FLAG;
3804         else
3805                 flags &= ~GRO_ENABLE_FLAG;
3806
3807         if (features & NETIF_F_LOOPBACK) {
3808                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3809                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
3810                         bnx2x_reload = true;
3811                 }
3812         } else {
3813                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3814                         bp->link_params.loopback_mode = LOOPBACK_NONE;
3815                         bnx2x_reload = true;
3816                 }
3817         }
3818
3819         if (flags ^ bp->flags) {
3820                 bp->flags = flags;
3821                 bnx2x_reload = true;
3822         }
3823
3824         if (bnx2x_reload) {
3825                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3826                         return bnx2x_reload_if_running(dev);
3827                 /* else: bnx2x_nic_load() will be called at end of recovery */
3828         }
3829
3830         return 0;
3831 }
3832
3833 void bnx2x_tx_timeout(struct net_device *dev)
3834 {
3835         struct bnx2x *bp = netdev_priv(dev);
3836
3837 #ifdef BNX2X_STOP_ON_ERROR
3838         if (!bp->panic)
3839                 bnx2x_panic();
3840 #endif
3841
3842         smp_mb__before_clear_bit();
3843         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3844         smp_mb__after_clear_bit();
3845
3846         /* This allows the netif to be shutdown gracefully before resetting */
3847         schedule_delayed_work(&bp->sp_rtnl_task, 0);
3848 }
3849
3850 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3851 {
3852         struct net_device *dev = pci_get_drvdata(pdev);
3853         struct bnx2x *bp;
3854
3855         if (!dev) {
3856                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3857                 return -ENODEV;
3858         }
3859         bp = netdev_priv(dev);
3860
3861         rtnl_lock();
3862
3863         pci_save_state(pdev);
3864
3865         if (!netif_running(dev)) {
3866                 rtnl_unlock();
3867                 return 0;
3868         }
3869
3870         netif_device_detach(dev);
3871
3872         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3873
3874         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3875
3876         rtnl_unlock();
3877
3878         return 0;
3879 }
3880
3881 int bnx2x_resume(struct pci_dev *pdev)
3882 {
3883         struct net_device *dev = pci_get_drvdata(pdev);
3884         struct bnx2x *bp;
3885         int rc;
3886
3887         if (!dev) {
3888                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3889                 return -ENODEV;
3890         }
3891         bp = netdev_priv(dev);
3892
3893         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3894                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
3895                 return -EAGAIN;
3896         }
3897
3898         rtnl_lock();
3899
3900         pci_restore_state(pdev);
3901
3902         if (!netif_running(dev)) {
3903                 rtnl_unlock();
3904                 return 0;
3905         }
3906
3907         bnx2x_set_power_state(bp, PCI_D0);
3908         netif_device_attach(dev);
3909
3910         rc = bnx2x_nic_load(bp, LOAD_OPEN);
3911
3912         rtnl_unlock();
3913
3914         return rc;
3915 }
3916
3917
3918 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3919                               u32 cid)
3920 {
3921         /* ustorm cxt validation */
3922         cxt->ustorm_ag_context.cdu_usage =
3923                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3924                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3925         /* xcontext validation */
3926         cxt->xstorm_ag_context.cdu_reserved =
3927                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3928                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3929 }
3930
3931 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3932                                     u8 fw_sb_id, u8 sb_index,
3933                                     u8 ticks)
3934 {
3935
3936         u32 addr = BAR_CSTRORM_INTMEM +
3937                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3938         REG_WR8(bp, addr, ticks);
3939         DP(NETIF_MSG_IFUP,
3940            "port %x fw_sb_id %d sb_index %d ticks %d\n",
3941            port, fw_sb_id, sb_index, ticks);
3942 }
3943
3944 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3945                                     u16 fw_sb_id, u8 sb_index,
3946                                     u8 disable)
3947 {
3948         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3949         u32 addr = BAR_CSTRORM_INTMEM +
3950                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3951         u16 flags = REG_RD16(bp, addr);
3952         /* clear and set */
3953         flags &= ~HC_INDEX_DATA_HC_ENABLED;
3954         flags |= enable_flag;
3955         REG_WR16(bp, addr, flags);
3956         DP(NETIF_MSG_IFUP,
3957            "port %x fw_sb_id %d sb_index %d disable %d\n",
3958            port, fw_sb_id, sb_index, disable);
3959 }
3960
3961 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3962                                     u8 sb_index, u8 disable, u16 usec)
3963 {
3964         int port = BP_PORT(bp);
3965         u8 ticks = usec / BNX2X_BTR;
3966
3967         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3968
3969         disable = disable ? 1 : (usec ? 0 : 1);
3970         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3971 }