27d6d6cb1b9bb6172f385d26ec9a52d882248978
[pandora-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
30 #include "bnx2x_sp.h"
31
32
33
34 /**
35  * bnx2x_bz_fp - zero content of the fastpath structure.
36  *
37  * @bp:         driver handle
38  * @index:      fastpath index to be zeroed
39  *
40  * Makes sure the contents of the bp->fp[index].napi is kept
41  * intact.
42  */
43 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
44 {
45         struct bnx2x_fastpath *fp = &bp->fp[index];
46         struct napi_struct orig_napi = fp->napi;
47         /* bzero bnx2x_fastpath contents */
48         memset(fp, 0, sizeof(*fp));
49
50         /* Restore the NAPI object as it has been already initialized */
51         fp->napi = orig_napi;
52
53         fp->bp = bp;
54         fp->index = index;
55         if (IS_ETH_FP(fp))
56                 fp->max_cos = bp->max_cos;
57         else
58                 /* Special queues support only one CoS */
59                 fp->max_cos = 1;
60
61         /*
62          * set the tpa flag for each queue. The tpa flag determines the queue
63          * minimal size so it must be set prior to queue memory allocation
64          */
65         fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
66
67 #ifdef BCM_CNIC
68         /* We don't want TPA on an FCoE L2 ring */
69         if (IS_FCOE_FP(fp))
70                 fp->disable_tpa = 1;
71 #endif
72 }
73
74 /**
75  * bnx2x_move_fp - move content of the fastpath structure.
76  *
77  * @bp:         driver handle
78  * @from:       source FP index
79  * @to:         destination FP index
80  *
81  * Makes sure the contents of the bp->fp[to].napi is kept
82  * intact.
83  */
84 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
85 {
86         struct bnx2x_fastpath *from_fp = &bp->fp[from];
87         struct bnx2x_fastpath *to_fp = &bp->fp[to];
88         struct napi_struct orig_napi = to_fp->napi;
89         /* Move bnx2x_fastpath contents */
90         memcpy(to_fp, from_fp, sizeof(*to_fp));
91         to_fp->index = to;
92
93         /* Restore the NAPI object as it has been already initialized */
94         to_fp->napi = orig_napi;
95 }
96
97 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
98
99 /* free skb in the packet ring at pos idx
100  * return idx of last bd freed
101  */
102 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
103                              u16 idx)
104 {
105         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
106         struct eth_tx_start_bd *tx_start_bd;
107         struct eth_tx_bd *tx_data_bd;
108         struct sk_buff *skb = tx_buf->skb;
109         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
110         int nbd;
111
112         /* prefetch skb end pointer to speedup dev_kfree_skb() */
113         prefetch(&skb->end);
114
115         DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
116            txdata->txq_index, idx, tx_buf, skb);
117
118         /* unmap first bd */
119         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
120         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
121         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
122                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
123
124
125         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
126 #ifdef BNX2X_STOP_ON_ERROR
127         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
128                 BNX2X_ERR("BAD nbd!\n");
129                 bnx2x_panic();
130         }
131 #endif
132         new_cons = nbd + tx_buf->first_bd;
133
134         /* Get the next bd */
135         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
136
137         /* Skip a parse bd... */
138         --nbd;
139         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
140
141         /* ...and the TSO split header bd since they have no mapping */
142         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
143                 --nbd;
144                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
145         }
146
147         /* now free frags */
148         while (nbd > 0) {
149
150                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
151                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
152                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
153                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
154                 if (--nbd)
155                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
156         }
157
158         /* release skb */
159         WARN_ON(!skb);
160         dev_kfree_skb_any(skb);
161         tx_buf->first_bd = 0;
162         tx_buf->skb = NULL;
163
164         return new_cons;
165 }
166
167 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
168 {
169         struct netdev_queue *txq;
170         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
171
172 #ifdef BNX2X_STOP_ON_ERROR
173         if (unlikely(bp->panic))
174                 return -1;
175 #endif
176
177         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
178         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
179         sw_cons = txdata->tx_pkt_cons;
180
181         while (sw_cons != hw_cons) {
182                 u16 pkt_cons;
183
184                 pkt_cons = TX_BD(sw_cons);
185
186                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
187                                       " pkt_cons %u\n",
188                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
189
190                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
191                 sw_cons++;
192         }
193
194         txdata->tx_pkt_cons = sw_cons;
195         txdata->tx_bd_cons = bd_cons;
196
197         /* Need to make the tx_bd_cons update visible to start_xmit()
198          * before checking for netif_tx_queue_stopped().  Without the
199          * memory barrier, there is a small possibility that
200          * start_xmit() will miss it and cause the queue to be stopped
201          * forever.
202          * On the other hand we need an rmb() here to ensure the proper
203          * ordering of bit testing in the following
204          * netif_tx_queue_stopped(txq) call.
205          */
206         smp_mb();
207
208         if (unlikely(netif_tx_queue_stopped(txq))) {
209                 /* Taking tx_lock() is needed to prevent reenabling the queue
210                  * while it's empty. This could have happen if rx_action() gets
211                  * suspended in bnx2x_tx_int() after the condition before
212                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
213                  *
214                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
215                  * sends some packets consuming the whole queue again->
216                  * stops the queue
217                  */
218
219                 __netif_tx_lock(txq, smp_processor_id());
220
221                 if ((netif_tx_queue_stopped(txq)) &&
222                     (bp->state == BNX2X_STATE_OPEN) &&
223                     (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
224                         netif_tx_wake_queue(txq);
225
226                 __netif_tx_unlock(txq);
227         }
228         return 0;
229 }
230
231 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
232                                              u16 idx)
233 {
234         u16 last_max = fp->last_max_sge;
235
236         if (SUB_S16(idx, last_max) > 0)
237                 fp->last_max_sge = idx;
238 }
239
240 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
241                                   struct eth_fast_path_rx_cqe *fp_cqe)
242 {
243         struct bnx2x *bp = fp->bp;
244         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
245                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
246                       SGE_PAGE_SHIFT;
247         u16 last_max, last_elem, first_elem;
248         u16 delta = 0;
249         u16 i;
250
251         if (!sge_len)
252                 return;
253
254         /* First mark all used pages */
255         for (i = 0; i < sge_len; i++)
256                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
257                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
258
259         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
260            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
261
262         /* Here we assume that the last SGE index is the biggest */
263         prefetch((void *)(fp->sge_mask));
264         bnx2x_update_last_max_sge(fp,
265                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
266
267         last_max = RX_SGE(fp->last_max_sge);
268         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
269         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
270
271         /* If ring is not full */
272         if (last_elem + 1 != first_elem)
273                 last_elem++;
274
275         /* Now update the prod */
276         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
277                 if (likely(fp->sge_mask[i]))
278                         break;
279
280                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
281                 delta += BIT_VEC64_ELEM_SZ;
282         }
283
284         if (delta > 0) {
285                 fp->rx_sge_prod += delta;
286                 /* clear page-end entries */
287                 bnx2x_clear_sge_mask_next_elems(fp);
288         }
289
290         DP(NETIF_MSG_RX_STATUS,
291            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
292            fp->last_max_sge, fp->rx_sge_prod);
293 }
294
295 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
296                             struct sk_buff *skb, u16 cons, u16 prod,
297                             struct eth_fast_path_rx_cqe *cqe)
298 {
299         struct bnx2x *bp = fp->bp;
300         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
301         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
302         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
303         dma_addr_t mapping;
304         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
305         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
306
307         /* print error if current state != stop */
308         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
309                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
310
311         /* Try to map an empty skb from the aggregation info  */
312         mapping = dma_map_single(&bp->pdev->dev,
313                                  first_buf->skb->data,
314                                  fp->rx_buf_size, DMA_FROM_DEVICE);
315         /*
316          *  ...if it fails - move the skb from the consumer to the producer
317          *  and set the current aggregation state as ERROR to drop it
318          *  when TPA_STOP arrives.
319          */
320
321         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
322                 /* Move the BD from the consumer to the producer */
323                 bnx2x_reuse_rx_skb(fp, cons, prod);
324                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
325                 return;
326         }
327
328         /* move empty skb from pool to prod */
329         prod_rx_buf->skb = first_buf->skb;
330         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
331         /* point prod_bd to new skb */
332         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
333         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
334
335         /* move partial skb from cons to pool (don't unmap yet) */
336         *first_buf = *cons_rx_buf;
337
338         /* mark bin state as START */
339         tpa_info->parsing_flags =
340                 le16_to_cpu(cqe->pars_flags.flags);
341         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
342         tpa_info->tpa_state = BNX2X_TPA_START;
343         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
344         tpa_info->placement_offset = cqe->placement_offset;
345
346 #ifdef BNX2X_STOP_ON_ERROR
347         fp->tpa_queue_used |= (1 << queue);
348 #ifdef _ASM_GENERIC_INT_L64_H
349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
350 #else
351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
352 #endif
353            fp->tpa_queue_used);
354 #endif
355 }
356
357 /* Timestamp option length allowed for TPA aggregation:
358  *
359  *              nop nop kind length echo val
360  */
361 #define TPA_TSTAMP_OPT_LEN      12
362 /**
363  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
364  *
365  * @bp:                 driver handle
366  * @parsing_flags:      parsing flags from the START CQE
367  * @len_on_bd:          total length of the first packet for the
368  *                      aggregation.
369  *
370  * Approximate value of the MSS for this aggregation calculated using
371  * the first packet of it.
372  */
373 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
374                                     u16 len_on_bd)
375 {
376         /*
377          * TPA arrgregation won't have either IP options or TCP options
378          * other than timestamp or IPv6 extension headers.
379          */
380         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
381
382         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
383             PRS_FLAG_OVERETH_IPV6)
384                 hdrs_len += sizeof(struct ipv6hdr);
385         else /* IPv4 */
386                 hdrs_len += sizeof(struct iphdr);
387
388
389         /* Check if there was a TCP timestamp, if there is it's will
390          * always be 12 bytes length: nop nop kind length echo val.
391          *
392          * Otherwise FW would close the aggregation.
393          */
394         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
395                 hdrs_len += TPA_TSTAMP_OPT_LEN;
396
397         return len_on_bd - hdrs_len;
398 }
399
400 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
401                                u16 queue, struct sk_buff *skb,
402                                struct eth_end_agg_rx_cqe *cqe,
403                                u16 cqe_idx)
404 {
405         struct sw_rx_page *rx_pg, old_rx_pg;
406         u32 i, frag_len, frag_size, pages;
407         int err;
408         int j;
409         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
410         u16 len_on_bd = tpa_info->len_on_bd;
411
412         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
413         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
414
415         /* This is needed in order to enable forwarding support */
416         if (frag_size)
417                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
418                                         tpa_info->parsing_flags, len_on_bd);
419
420 #ifdef BNX2X_STOP_ON_ERROR
421         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
422                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
423                           pages, cqe_idx);
424                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
425                 bnx2x_panic();
426                 return -EINVAL;
427         }
428 #endif
429
430         /* Run through the SGL and compose the fragmented skb */
431         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
432                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
433
434                 /* FW gives the indices of the SGE as if the ring is an array
435                    (meaning that "next" element will consume 2 indices) */
436                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
437                 rx_pg = &fp->rx_page_ring[sge_idx];
438                 old_rx_pg = *rx_pg;
439
440                 /* If we fail to allocate a substitute page, we simply stop
441                    where we are and drop the whole packet */
442                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
443                 if (unlikely(err)) {
444                         fp->eth_q_stats.rx_skb_alloc_failed++;
445                         return err;
446                 }
447
448                 /* Unmap the page as we r going to pass it to the stack */
449                 dma_unmap_page(&bp->pdev->dev,
450                                dma_unmap_addr(&old_rx_pg, mapping),
451                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
452
453                 /* Add one frag and update the appropriate fields in the skb */
454                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
455
456                 skb->data_len += frag_len;
457                 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
458                 skb->len += frag_len;
459
460                 frag_size -= frag_len;
461         }
462
463         return 0;
464 }
465
466 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
467                            u16 queue, struct eth_end_agg_rx_cqe *cqe,
468                            u16 cqe_idx)
469 {
470         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
471         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
472         u8 pad = tpa_info->placement_offset;
473         u16 len = tpa_info->len_on_bd;
474         struct sk_buff *skb = rx_buf->skb;
475         /* alloc new skb */
476         struct sk_buff *new_skb;
477         u8 old_tpa_state = tpa_info->tpa_state;
478
479         tpa_info->tpa_state = BNX2X_TPA_STOP;
480
481         /* If we there was an error during the handling of the TPA_START -
482          * drop this aggregation.
483          */
484         if (old_tpa_state == BNX2X_TPA_ERROR)
485                 goto drop;
486
487         /* Try to allocate the new skb */
488         new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
489
490         /* Unmap skb in the pool anyway, as we are going to change
491            pool entry status to BNX2X_TPA_STOP even if new skb allocation
492            fails. */
493         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
494                          fp->rx_buf_size, DMA_FROM_DEVICE);
495
496         if (likely(new_skb)) {
497                 prefetch(skb);
498                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
499
500 #ifdef BNX2X_STOP_ON_ERROR
501                 if (pad + len > fp->rx_buf_size) {
502                         BNX2X_ERR("skb_put is about to fail...  "
503                                   "pad %d  len %d  rx_buf_size %d\n",
504                                   pad, len, fp->rx_buf_size);
505                         bnx2x_panic();
506                         return;
507                 }
508 #endif
509
510                 skb_reserve(skb, pad);
511                 skb_put(skb, len);
512
513                 skb->protocol = eth_type_trans(skb, bp->dev);
514                 skb->ip_summed = CHECKSUM_UNNECESSARY;
515
516                 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
517                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
518                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
519                         napi_gro_receive(&fp->napi, skb);
520                 } else {
521                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
522                            " - dropping packet!\n");
523                         dev_kfree_skb_any(skb);
524                 }
525
526
527                 /* put new skb in bin */
528                 rx_buf->skb = new_skb;
529
530                 return;
531         }
532
533 drop:
534         /* drop the packet and keep the buffer in the bin */
535         DP(NETIF_MSG_RX_STATUS,
536            "Failed to allocate or map a new skb - dropping packet!\n");
537         fp->eth_q_stats.rx_skb_alloc_failed++;
538 }
539
540 /* Set Toeplitz hash value in the skb using the value from the
541  * CQE (calculated by HW).
542  */
543 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
544                                         struct sk_buff *skb)
545 {
546         /* Set Toeplitz hash from CQE */
547         if ((bp->dev->features & NETIF_F_RXHASH) &&
548             (cqe->fast_path_cqe.status_flags &
549              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
550                 skb->rxhash =
551                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
552 }
553
554 static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
555                                 struct bnx2x_fastpath *fp)
556 {
557         /* Do nothing if no IP/L4 csum validation was done */
558
559         if (cqe->fast_path_cqe.status_flags &
560             (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
561              ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
562                 return;
563
564         /* If both IP/L4 validation were done, check if an error was found. */
565
566         if (cqe->fast_path_cqe.type_error_flags &
567             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
568              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
569                 fp->eth_q_stats.hw_csum_err++;
570         else
571                 skb->ip_summed = CHECKSUM_UNNECESSARY;
572 }
573
574 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
575 {
576         struct bnx2x *bp = fp->bp;
577         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
578         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
579         int rx_pkt = 0;
580
581 #ifdef BNX2X_STOP_ON_ERROR
582         if (unlikely(bp->panic))
583                 return 0;
584 #endif
585
586         /* CQ "next element" is of the size of the regular element,
587            that's why it's ok here */
588         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
589         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
590                 hw_comp_cons++;
591
592         bd_cons = fp->rx_bd_cons;
593         bd_prod = fp->rx_bd_prod;
594         bd_prod_fw = bd_prod;
595         sw_comp_cons = fp->rx_comp_cons;
596         sw_comp_prod = fp->rx_comp_prod;
597
598         /* Memory barrier necessary as speculative reads of the rx
599          * buffer can be ahead of the index in the status block
600          */
601         rmb();
602
603         DP(NETIF_MSG_RX_STATUS,
604            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
605            fp->index, hw_comp_cons, sw_comp_cons);
606
607         while (sw_comp_cons != hw_comp_cons) {
608                 struct sw_rx_bd *rx_buf = NULL;
609                 struct sk_buff *skb;
610                 union eth_rx_cqe *cqe;
611                 struct eth_fast_path_rx_cqe *cqe_fp;
612                 u8 cqe_fp_flags;
613                 enum eth_rx_cqe_type cqe_fp_type;
614                 u16 len, pad;
615
616 #ifdef BNX2X_STOP_ON_ERROR
617                 if (unlikely(bp->panic))
618                         return 0;
619 #endif
620
621                 comp_ring_cons = RCQ_BD(sw_comp_cons);
622                 bd_prod = RX_BD(bd_prod);
623                 bd_cons = RX_BD(bd_cons);
624
625                 /* Prefetch the page containing the BD descriptor
626                    at producer's index. It will be needed when new skb is
627                    allocated */
628                 prefetch((void *)(PAGE_ALIGN((unsigned long)
629                                              (&fp->rx_desc_ring[bd_prod])) -
630                                   PAGE_SIZE + 1));
631
632                 cqe = &fp->rx_comp_ring[comp_ring_cons];
633                 cqe_fp = &cqe->fast_path_cqe;
634                 cqe_fp_flags = cqe_fp->type_error_flags;
635                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
636
637                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
638                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
639                    cqe_fp_flags, cqe_fp->status_flags,
640                    le32_to_cpu(cqe_fp->rss_hash_result),
641                    le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
642
643                 /* is this a slowpath msg? */
644                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
645                         bnx2x_sp_event(fp, cqe);
646                         goto next_cqe;
647
648                 /* this is an rx packet */
649                 } else {
650                         rx_buf = &fp->rx_buf_ring[bd_cons];
651                         skb = rx_buf->skb;
652                         prefetch(skb);
653
654                         if (!CQE_TYPE_FAST(cqe_fp_type)) {
655 #ifdef BNX2X_STOP_ON_ERROR
656                                 /* sanity check */
657                                 if (fp->disable_tpa &&
658                                     (CQE_TYPE_START(cqe_fp_type) ||
659                                      CQE_TYPE_STOP(cqe_fp_type)))
660                                         BNX2X_ERR("START/STOP packet while "
661                                                   "disable_tpa type %x\n",
662                                                   CQE_TYPE(cqe_fp_type));
663 #endif
664
665                                 if (CQE_TYPE_START(cqe_fp_type)) {
666                                         u16 queue = cqe_fp->queue_index;
667                                         DP(NETIF_MSG_RX_STATUS,
668                                            "calling tpa_start on queue %d\n",
669                                            queue);
670
671                                         bnx2x_tpa_start(fp, queue, skb,
672                                                         bd_cons, bd_prod,
673                                                         cqe_fp);
674
675                                         /* Set Toeplitz hash for LRO skb */
676                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
677
678                                         goto next_rx;
679
680                                 } else {
681                                         u16 queue =
682                                                 cqe->end_agg_cqe.queue_index;
683                                         DP(NETIF_MSG_RX_STATUS,
684                                            "calling tpa_stop on queue %d\n",
685                                            queue);
686
687                                         bnx2x_tpa_stop(bp, fp, queue,
688                                                        &cqe->end_agg_cqe,
689                                                        comp_ring_cons);
690 #ifdef BNX2X_STOP_ON_ERROR
691                                         if (bp->panic)
692                                                 return 0;
693 #endif
694
695                                         bnx2x_update_sge_prod(fp, cqe_fp);
696                                         goto next_cqe;
697                                 }
698                         }
699                         /* non TPA */
700                         len = le16_to_cpu(cqe_fp->pkt_len);
701                         pad = cqe_fp->placement_offset;
702                         dma_sync_single_for_cpu(&bp->pdev->dev,
703                                         dma_unmap_addr(rx_buf, mapping),
704                                                        pad + RX_COPY_THRESH,
705                                                        DMA_FROM_DEVICE);
706                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
707
708                         /* is this an error packet? */
709                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
710                                 DP(NETIF_MSG_RX_ERR,
711                                    "ERROR  flags %x  rx packet %u\n",
712                                    cqe_fp_flags, sw_comp_cons);
713                                 fp->eth_q_stats.rx_err_discard_pkt++;
714                                 goto reuse_rx;
715                         }
716
717                         /* Since we don't have a jumbo ring
718                          * copy small packets if mtu > 1500
719                          */
720                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
721                             (len <= RX_COPY_THRESH)) {
722                                 struct sk_buff *new_skb;
723
724                                 new_skb = netdev_alloc_skb(bp->dev, len + pad);
725                                 if (new_skb == NULL) {
726                                         DP(NETIF_MSG_RX_ERR,
727                                            "ERROR  packet dropped "
728                                            "because of alloc failure\n");
729                                         fp->eth_q_stats.rx_skb_alloc_failed++;
730                                         goto reuse_rx;
731                                 }
732
733                                 /* aligned copy */
734                                 skb_copy_from_linear_data_offset(skb, pad,
735                                                     new_skb->data + pad, len);
736                                 skb_reserve(new_skb, pad);
737                                 skb_put(new_skb, len);
738
739                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
740
741                                 skb = new_skb;
742
743                         } else
744                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
745                                 dma_unmap_single(&bp->pdev->dev,
746                                         dma_unmap_addr(rx_buf, mapping),
747                                                  fp->rx_buf_size,
748                                                  DMA_FROM_DEVICE);
749                                 skb_reserve(skb, pad);
750                                 skb_put(skb, len);
751
752                         } else {
753                                 DP(NETIF_MSG_RX_ERR,
754                                    "ERROR  packet dropped because "
755                                    "of alloc failure\n");
756                                 fp->eth_q_stats.rx_skb_alloc_failed++;
757 reuse_rx:
758                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
759                                 goto next_rx;
760                         }
761
762                         skb->protocol = eth_type_trans(skb, bp->dev);
763
764                         /* Set Toeplitz hash for a none-LRO skb */
765                         bnx2x_set_skb_rxhash(bp, cqe, skb);
766
767                         skb_checksum_none_assert(skb);
768
769                         if (bp->dev->features & NETIF_F_RXCSUM)
770                                 bnx2x_csum_validate(skb, cqe, fp);
771
772                 }
773
774                 skb_record_rx_queue(skb, fp->index);
775
776                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
777                     PARSING_FLAGS_VLAN)
778                         __vlan_hwaccel_put_tag(skb,
779                                                le16_to_cpu(cqe_fp->vlan_tag));
780                 napi_gro_receive(&fp->napi, skb);
781
782
783 next_rx:
784                 rx_buf->skb = NULL;
785
786                 bd_cons = NEXT_RX_IDX(bd_cons);
787                 bd_prod = NEXT_RX_IDX(bd_prod);
788                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
789                 rx_pkt++;
790 next_cqe:
791                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
792                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
793
794                 if (rx_pkt == budget)
795                         break;
796         } /* while */
797
798         fp->rx_bd_cons = bd_cons;
799         fp->rx_bd_prod = bd_prod_fw;
800         fp->rx_comp_cons = sw_comp_cons;
801         fp->rx_comp_prod = sw_comp_prod;
802
803         /* Update producers */
804         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
805                              fp->rx_sge_prod);
806
807         fp->rx_pkt += rx_pkt;
808         fp->rx_calls++;
809
810         return rx_pkt;
811 }
812
813 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
814 {
815         struct bnx2x_fastpath *fp = fp_cookie;
816         struct bnx2x *bp = fp->bp;
817         u8 cos;
818
819         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
820                          "[fp %d fw_sd %d igusb %d]\n",
821            fp->index, fp->fw_sb_id, fp->igu_sb_id);
822         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
823
824 #ifdef BNX2X_STOP_ON_ERROR
825         if (unlikely(bp->panic))
826                 return IRQ_HANDLED;
827 #endif
828
829         /* Handle Rx and Tx according to MSI-X vector */
830         prefetch(fp->rx_cons_sb);
831
832         for_each_cos_in_tx_queue(fp, cos)
833                 prefetch(fp->txdata[cos].tx_cons_sb);
834
835         prefetch(&fp->sb_running_index[SM_RX_ID]);
836         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
837
838         return IRQ_HANDLED;
839 }
840
841 /* HW Lock for shared dual port PHYs */
842 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
843 {
844         mutex_lock(&bp->port.phy_mutex);
845
846         if (bp->port.need_hw_lock)
847                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
848 }
849
850 void bnx2x_release_phy_lock(struct bnx2x *bp)
851 {
852         if (bp->port.need_hw_lock)
853                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
854
855         mutex_unlock(&bp->port.phy_mutex);
856 }
857
858 /* calculates MF speed according to current linespeed and MF configuration */
859 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
860 {
861         u16 line_speed = bp->link_vars.line_speed;
862         if (IS_MF(bp)) {
863                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
864                                                    bp->mf_config[BP_VN(bp)]);
865
866                 /* Calculate the current MAX line speed limit for the MF
867                  * devices
868                  */
869                 if (IS_MF_SI(bp))
870                         line_speed = (line_speed * maxCfg) / 100;
871                 else { /* SD mode */
872                         u16 vn_max_rate = maxCfg * 100;
873
874                         if (vn_max_rate < line_speed)
875                                 line_speed = vn_max_rate;
876                 }
877         }
878
879         return line_speed;
880 }
881
882 /**
883  * bnx2x_fill_report_data - fill link report data to report
884  *
885  * @bp:         driver handle
886  * @data:       link state to update
887  *
888  * It uses a none-atomic bit operations because is called under the mutex.
889  */
890 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
891                                           struct bnx2x_link_report_data *data)
892 {
893         u16 line_speed = bnx2x_get_mf_speed(bp);
894
895         memset(data, 0, sizeof(*data));
896
897         /* Fill the report data: efective line speed */
898         data->line_speed = line_speed;
899
900         /* Link is down */
901         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
902                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
903                           &data->link_report_flags);
904
905         /* Full DUPLEX */
906         if (bp->link_vars.duplex == DUPLEX_FULL)
907                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
908
909         /* Rx Flow Control is ON */
910         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
911                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
912
913         /* Tx Flow Control is ON */
914         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
915                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
916 }
917
918 /**
919  * bnx2x_link_report - report link status to OS.
920  *
921  * @bp:         driver handle
922  *
923  * Calls the __bnx2x_link_report() under the same locking scheme
924  * as a link/PHY state managing code to ensure a consistent link
925  * reporting.
926  */
927
928 void bnx2x_link_report(struct bnx2x *bp)
929 {
930         bnx2x_acquire_phy_lock(bp);
931         __bnx2x_link_report(bp);
932         bnx2x_release_phy_lock(bp);
933 }
934
935 /**
936  * __bnx2x_link_report - report link status to OS.
937  *
938  * @bp:         driver handle
939  *
940  * None atomic inmlementation.
941  * Should be called under the phy_lock.
942  */
943 void __bnx2x_link_report(struct bnx2x *bp)
944 {
945         struct bnx2x_link_report_data cur_data;
946
947         /* reread mf_cfg */
948         if (!CHIP_IS_E1(bp))
949                 bnx2x_read_mf_cfg(bp);
950
951         /* Read the current link report info */
952         bnx2x_fill_report_data(bp, &cur_data);
953
954         /* Don't report link down or exactly the same link status twice */
955         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
956             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
957                       &bp->last_reported_link.link_report_flags) &&
958              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
959                       &cur_data.link_report_flags)))
960                 return;
961
962         bp->link_cnt++;
963
964         /* We are going to report a new link parameters now -
965          * remember the current data for the next time.
966          */
967         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
968
969         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
970                      &cur_data.link_report_flags)) {
971                 netif_carrier_off(bp->dev);
972                 netdev_err(bp->dev, "NIC Link is Down\n");
973                 return;
974         } else {
975                 const char *duplex;
976                 const char *flow;
977
978                 netif_carrier_on(bp->dev);
979
980                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
981                                        &cur_data.link_report_flags))
982                         duplex = "full";
983                 else
984                         duplex = "half";
985
986                 /* Handle the FC at the end so that only these flags would be
987                  * possibly set. This way we may easily check if there is no FC
988                  * enabled.
989                  */
990                 if (cur_data.link_report_flags) {
991                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
992                                      &cur_data.link_report_flags)) {
993                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
994                                      &cur_data.link_report_flags))
995                                         flow = "ON - receive & transmit";
996                                 else
997                                         flow = "ON - receive";
998                         } else {
999                                 flow = "ON - transmit";
1000                         }
1001                 } else {
1002                         flow = "none";
1003                 }
1004                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1005                             cur_data.line_speed, duplex, flow);
1006         }
1007 }
1008
1009 void bnx2x_init_rx_rings(struct bnx2x *bp)
1010 {
1011         int func = BP_FUNC(bp);
1012         u16 ring_prod;
1013         int i, j;
1014
1015         /* Allocate TPA resources */
1016         for_each_rx_queue(bp, j) {
1017                 struct bnx2x_fastpath *fp = &bp->fp[j];
1018
1019                 DP(NETIF_MSG_IFUP,
1020                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1021
1022                 if (!fp->disable_tpa) {
1023                         /* Fill the per-aggregtion pool */
1024                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1025                                 struct bnx2x_agg_info *tpa_info =
1026                                         &fp->tpa_info[i];
1027                                 struct sw_rx_bd *first_buf =
1028                                         &tpa_info->first_buf;
1029
1030                                 first_buf->skb = netdev_alloc_skb(bp->dev,
1031                                                        fp->rx_buf_size);
1032                                 if (!first_buf->skb) {
1033                                         BNX2X_ERR("Failed to allocate TPA "
1034                                                   "skb pool for queue[%d] - "
1035                                                   "disabling TPA on this "
1036                                                   "queue!\n", j);
1037                                         bnx2x_free_tpa_pool(bp, fp, i);
1038                                         fp->disable_tpa = 1;
1039                                         break;
1040                                 }
1041                                 dma_unmap_addr_set(first_buf, mapping, 0);
1042                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1043                         }
1044
1045                         /* "next page" elements initialization */
1046                         bnx2x_set_next_page_sgl(fp);
1047
1048                         /* set SGEs bit mask */
1049                         bnx2x_init_sge_ring_bit_mask(fp);
1050
1051                         /* Allocate SGEs and initialize the ring elements */
1052                         for (i = 0, ring_prod = 0;
1053                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1054
1055                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1056                                         BNX2X_ERR("was only able to allocate "
1057                                                   "%d rx sges\n", i);
1058                                         BNX2X_ERR("disabling TPA for "
1059                                                   "queue[%d]\n", j);
1060                                         /* Cleanup already allocated elements */
1061                                         bnx2x_free_rx_sge_range(bp, fp,
1062                                                                 ring_prod);
1063                                         bnx2x_free_tpa_pool(bp, fp,
1064                                                             MAX_AGG_QS(bp));
1065                                         fp->disable_tpa = 1;
1066                                         ring_prod = 0;
1067                                         break;
1068                                 }
1069                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1070                         }
1071
1072                         fp->rx_sge_prod = ring_prod;
1073                 }
1074         }
1075
1076         for_each_rx_queue(bp, j) {
1077                 struct bnx2x_fastpath *fp = &bp->fp[j];
1078
1079                 fp->rx_bd_cons = 0;
1080
1081                 /* Activate BD ring */
1082                 /* Warning!
1083                  * this will generate an interrupt (to the TSTORM)
1084                  * must only be done after chip is initialized
1085                  */
1086                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1087                                      fp->rx_sge_prod);
1088
1089                 if (j != 0)
1090                         continue;
1091
1092                 if (CHIP_IS_E1(bp)) {
1093                         REG_WR(bp, BAR_USTRORM_INTMEM +
1094                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1095                                U64_LO(fp->rx_comp_mapping));
1096                         REG_WR(bp, BAR_USTRORM_INTMEM +
1097                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1098                                U64_HI(fp->rx_comp_mapping));
1099                 }
1100         }
1101 }
1102
1103 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1104 {
1105         int i;
1106         u8 cos;
1107
1108         for_each_tx_queue(bp, i) {
1109                 struct bnx2x_fastpath *fp = &bp->fp[i];
1110                 for_each_cos_in_tx_queue(fp, cos) {
1111                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1112
1113                         u16 bd_cons = txdata->tx_bd_cons;
1114                         u16 sw_prod = txdata->tx_pkt_prod;
1115                         u16 sw_cons = txdata->tx_pkt_cons;
1116
1117                         while (sw_cons != sw_prod) {
1118                                 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1119                                                             TX_BD(sw_cons));
1120                                 sw_cons++;
1121                         }
1122                 }
1123         }
1124 }
1125
1126 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1127 {
1128         struct bnx2x *bp = fp->bp;
1129         int i;
1130
1131         /* ring wasn't allocated */
1132         if (fp->rx_buf_ring == NULL)
1133                 return;
1134
1135         for (i = 0; i < NUM_RX_BD; i++) {
1136                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1137                 struct sk_buff *skb = rx_buf->skb;
1138
1139                 if (skb == NULL)
1140                         continue;
1141                 dma_unmap_single(&bp->pdev->dev,
1142                                  dma_unmap_addr(rx_buf, mapping),
1143                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1144
1145                 rx_buf->skb = NULL;
1146                 dev_kfree_skb(skb);
1147         }
1148 }
1149
1150 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1151 {
1152         int j;
1153
1154         for_each_rx_queue(bp, j) {
1155                 struct bnx2x_fastpath *fp = &bp->fp[j];
1156
1157                 bnx2x_free_rx_bds(fp);
1158
1159                 if (!fp->disable_tpa)
1160                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1161         }
1162 }
1163
1164 void bnx2x_free_skbs(struct bnx2x *bp)
1165 {
1166         bnx2x_free_tx_skbs(bp);
1167         bnx2x_free_rx_skbs(bp);
1168 }
1169
1170 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1171 {
1172         /* load old values */
1173         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1174
1175         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1176                 /* leave all but MAX value */
1177                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1178
1179                 /* set new MAX value */
1180                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1181                                 & FUNC_MF_CFG_MAX_BW_MASK;
1182
1183                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1184         }
1185 }
1186
1187 /**
1188  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1189  *
1190  * @bp:         driver handle
1191  * @nvecs:      number of vectors to be released
1192  */
1193 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1194 {
1195         int i, offset = 0;
1196
1197         if (nvecs == offset)
1198                 return;
1199         free_irq(bp->msix_table[offset].vector, bp->dev);
1200         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1201            bp->msix_table[offset].vector);
1202         offset++;
1203 #ifdef BCM_CNIC
1204         if (nvecs == offset)
1205                 return;
1206         offset++;
1207 #endif
1208
1209         for_each_eth_queue(bp, i) {
1210                 if (nvecs == offset)
1211                         return;
1212                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1213                    "irq\n", i, bp->msix_table[offset].vector);
1214
1215                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1216         }
1217 }
1218
1219 void bnx2x_free_irq(struct bnx2x *bp)
1220 {
1221         if (bp->flags & USING_MSIX_FLAG)
1222                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1223                                      CNIC_PRESENT + 1);
1224         else if (bp->flags & USING_MSI_FLAG)
1225                 free_irq(bp->pdev->irq, bp->dev);
1226         else
1227                 free_irq(bp->pdev->irq, bp->dev);
1228 }
1229
1230 int bnx2x_enable_msix(struct bnx2x *bp)
1231 {
1232         int msix_vec = 0, i, rc, req_cnt;
1233
1234         bp->msix_table[msix_vec].entry = msix_vec;
1235         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1236            bp->msix_table[0].entry);
1237         msix_vec++;
1238
1239 #ifdef BCM_CNIC
1240         bp->msix_table[msix_vec].entry = msix_vec;
1241         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1242            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1243         msix_vec++;
1244 #endif
1245         /* We need separate vectors for ETH queues only (not FCoE) */
1246         for_each_eth_queue(bp, i) {
1247                 bp->msix_table[msix_vec].entry = msix_vec;
1248                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1249                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1250                 msix_vec++;
1251         }
1252
1253         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1254
1255         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1256
1257         /*
1258          * reconfigure number of tx/rx queues according to available
1259          * MSI-X vectors
1260          */
1261         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1262                 /* how less vectors we will have? */
1263                 int diff = req_cnt - rc;
1264
1265                 DP(NETIF_MSG_IFUP,
1266                    "Trying to use less MSI-X vectors: %d\n", rc);
1267
1268                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1269
1270                 if (rc) {
1271                         DP(NETIF_MSG_IFUP,
1272                            "MSI-X is not attainable  rc %d\n", rc);
1273                         return rc;
1274                 }
1275                 /*
1276                  * decrease number of queues by number of unallocated entries
1277                  */
1278                 bp->num_queues -= diff;
1279
1280                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1281                                   bp->num_queues);
1282         } else if (rc) {
1283                 /* fall to INTx if not enough memory */
1284                 if (rc == -ENOMEM)
1285                         bp->flags |= DISABLE_MSI_FLAG;
1286                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1287                 return rc;
1288         }
1289
1290         bp->flags |= USING_MSIX_FLAG;
1291
1292         return 0;
1293 }
1294
1295 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1296 {
1297         int i, rc, offset = 0;
1298
1299         rc = request_irq(bp->msix_table[offset++].vector,
1300                          bnx2x_msix_sp_int, 0,
1301                          bp->dev->name, bp->dev);
1302         if (rc) {
1303                 BNX2X_ERR("request sp irq failed\n");
1304                 return -EBUSY;
1305         }
1306
1307 #ifdef BCM_CNIC
1308         offset++;
1309 #endif
1310         for_each_eth_queue(bp, i) {
1311                 struct bnx2x_fastpath *fp = &bp->fp[i];
1312                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1313                          bp->dev->name, i);
1314
1315                 rc = request_irq(bp->msix_table[offset].vector,
1316                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1317                 if (rc) {
1318                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1319                               bp->msix_table[offset].vector, rc);
1320                         bnx2x_free_msix_irqs(bp, offset);
1321                         return -EBUSY;
1322                 }
1323
1324                 offset++;
1325         }
1326
1327         i = BNX2X_NUM_ETH_QUEUES(bp);
1328         offset = 1 + CNIC_PRESENT;
1329         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1330                " ... fp[%d] %d\n",
1331                bp->msix_table[0].vector,
1332                0, bp->msix_table[offset].vector,
1333                i - 1, bp->msix_table[offset + i - 1].vector);
1334
1335         return 0;
1336 }
1337
1338 int bnx2x_enable_msi(struct bnx2x *bp)
1339 {
1340         int rc;
1341
1342         rc = pci_enable_msi(bp->pdev);
1343         if (rc) {
1344                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1345                 return -1;
1346         }
1347         bp->flags |= USING_MSI_FLAG;
1348
1349         return 0;
1350 }
1351
1352 static int bnx2x_req_irq(struct bnx2x *bp)
1353 {
1354         unsigned long flags;
1355         int rc;
1356
1357         if (bp->flags & USING_MSI_FLAG)
1358                 flags = 0;
1359         else
1360                 flags = IRQF_SHARED;
1361
1362         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1363                          bp->dev->name, bp->dev);
1364         return rc;
1365 }
1366
1367 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1368 {
1369         int rc = 0;
1370         if (bp->flags & USING_MSIX_FLAG) {
1371                 rc = bnx2x_req_msix_irqs(bp);
1372                 if (rc)
1373                         return rc;
1374         } else {
1375                 bnx2x_ack_int(bp);
1376                 rc = bnx2x_req_irq(bp);
1377                 if (rc) {
1378                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1379                         return rc;
1380                 }
1381                 if (bp->flags & USING_MSI_FLAG) {
1382                         bp->dev->irq = bp->pdev->irq;
1383                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
1384                                bp->pdev->irq);
1385                 }
1386         }
1387
1388         return 0;
1389 }
1390
1391 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1392 {
1393         int i;
1394
1395         for_each_rx_queue(bp, i)
1396                 napi_enable(&bnx2x_fp(bp, i, napi));
1397 }
1398
1399 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1400 {
1401         int i;
1402
1403         for_each_rx_queue(bp, i)
1404                 napi_disable(&bnx2x_fp(bp, i, napi));
1405 }
1406
1407 void bnx2x_netif_start(struct bnx2x *bp)
1408 {
1409         if (netif_running(bp->dev)) {
1410                 bnx2x_napi_enable(bp);
1411                 bnx2x_int_enable(bp);
1412                 if (bp->state == BNX2X_STATE_OPEN)
1413                         netif_tx_wake_all_queues(bp->dev);
1414         }
1415 }
1416
1417 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1418 {
1419         bnx2x_int_disable_sync(bp, disable_hw);
1420         bnx2x_napi_disable(bp);
1421 }
1422
1423 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1424 {
1425         struct bnx2x *bp = netdev_priv(dev);
1426
1427 #ifdef BCM_CNIC
1428         if (!NO_FCOE(bp)) {
1429                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1430                 u16 ether_type = ntohs(hdr->h_proto);
1431
1432                 /* Skip VLAN tag if present */
1433                 if (ether_type == ETH_P_8021Q) {
1434                         struct vlan_ethhdr *vhdr =
1435                                 (struct vlan_ethhdr *)skb->data;
1436
1437                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1438                 }
1439
1440                 /* If ethertype is FCoE or FIP - use FCoE ring */
1441                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1442                         return bnx2x_fcoe_tx(bp, txq_index);
1443         }
1444 #endif
1445         /* select a non-FCoE queue */
1446         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1447 }
1448
1449 void bnx2x_set_num_queues(struct bnx2x *bp)
1450 {
1451         switch (bp->multi_mode) {
1452         case ETH_RSS_MODE_DISABLED:
1453                 bp->num_queues = 1;
1454                 break;
1455         case ETH_RSS_MODE_REGULAR:
1456                 bp->num_queues = bnx2x_calc_num_queues(bp);
1457                 break;
1458
1459         default:
1460                 bp->num_queues = 1;
1461                 break;
1462         }
1463
1464         /* Add special queues */
1465         bp->num_queues += NON_ETH_CONTEXT_USE;
1466 }
1467
1468 /**
1469  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1470  *
1471  * @bp:         Driver handle
1472  *
1473  * We currently support for at most 16 Tx queues for each CoS thus we will
1474  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1475  * bp->max_cos.
1476  *
1477  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1478  * index after all ETH L2 indices.
1479  *
1480  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1481  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1482  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1483  *
1484  * The proper configuration of skb->queue_mapping is handled by
1485  * bnx2x_select_queue() and __skb_tx_hash().
1486  *
1487  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1488  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1489  */
1490 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1491 {
1492         int rc, tx, rx;
1493
1494         tx = MAX_TXQS_PER_COS * bp->max_cos;
1495         rx = BNX2X_NUM_ETH_QUEUES(bp);
1496
1497 /* account for fcoe queue */
1498 #ifdef BCM_CNIC
1499         if (!NO_FCOE(bp)) {
1500                 rx += FCOE_PRESENT;
1501                 tx += FCOE_PRESENT;
1502         }
1503 #endif
1504
1505         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1506         if (rc) {
1507                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1508                 return rc;
1509         }
1510         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1511         if (rc) {
1512                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1513                 return rc;
1514         }
1515
1516         DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1517                           tx, rx);
1518
1519         return rc;
1520 }
1521
1522 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1523 {
1524         int i;
1525
1526         for_each_queue(bp, i) {
1527                 struct bnx2x_fastpath *fp = &bp->fp[i];
1528
1529                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1530                 if (IS_FCOE_IDX(i))
1531                         /*
1532                          * Although there are no IP frames expected to arrive to
1533                          * this ring we still want to add an
1534                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1535                          * overrun attack.
1536                          */
1537                         fp->rx_buf_size =
1538                                 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1539                                 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1540                 else
1541                         fp->rx_buf_size =
1542                                 bp->dev->mtu + ETH_OVREHEAD +
1543                                 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1544         }
1545 }
1546
1547 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1548 {
1549         int i;
1550         u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1551         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1552
1553         /*
1554          * Prepare the inital contents fo the indirection table if RSS is
1555          * enabled
1556          */
1557         if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1558                 for (i = 0; i < sizeof(ind_table); i++)
1559                         ind_table[i] =
1560                                 bp->fp->cl_id + (i % num_eth_queues);
1561         }
1562
1563         /*
1564          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1565          * per-port, so if explicit configuration is needed , do it only
1566          * for a PMF.
1567          *
1568          * For 57712 and newer on the other hand it's a per-function
1569          * configuration.
1570          */
1571         return bnx2x_config_rss_pf(bp, ind_table,
1572                                    bp->port.pmf || !CHIP_IS_E1x(bp));
1573 }
1574
1575 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1576 {
1577         struct bnx2x_config_rss_params params = {0};
1578         int i;
1579
1580         /* Although RSS is meaningless when there is a single HW queue we
1581          * still need it enabled in order to have HW Rx hash generated.
1582          *
1583          * if (!is_eth_multi(bp))
1584          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1585          */
1586
1587         params.rss_obj = &bp->rss_conf_obj;
1588
1589         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1590
1591         /* RSS mode */
1592         switch (bp->multi_mode) {
1593         case ETH_RSS_MODE_DISABLED:
1594                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1595                 break;
1596         case ETH_RSS_MODE_REGULAR:
1597                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1598                 break;
1599         case ETH_RSS_MODE_VLAN_PRI:
1600                 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1601                 break;
1602         case ETH_RSS_MODE_E1HOV_PRI:
1603                 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1604                 break;
1605         case ETH_RSS_MODE_IP_DSCP:
1606                 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1607                 break;
1608         default:
1609                 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1610                 return -EINVAL;
1611         }
1612
1613         /* If RSS is enabled */
1614         if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1615                 /* RSS configuration */
1616                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1617                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1618                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1619                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1620
1621                 /* Hash bits */
1622                 params.rss_result_mask = MULTI_MASK;
1623
1624                 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1625
1626                 if (config_hash) {
1627                         /* RSS keys */
1628                         for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1629                                 params.rss_key[i] = random32();
1630
1631                         __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1632                 }
1633         }
1634
1635         return bnx2x_config_rss(bp, &params);
1636 }
1637
1638 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1639 {
1640         struct bnx2x_func_state_params func_params = {0};
1641
1642         /* Prepare parameters for function state transitions */
1643         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1644
1645         func_params.f_obj = &bp->func_obj;
1646         func_params.cmd = BNX2X_F_CMD_HW_INIT;
1647
1648         func_params.params.hw_init.load_phase = load_code;
1649
1650         return bnx2x_func_state_change(bp, &func_params);
1651 }
1652
1653 /*
1654  * Cleans the object that have internal lists without sending
1655  * ramrods. Should be run when interrutps are disabled.
1656  */
1657 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1658 {
1659         int rc;
1660         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1661         struct bnx2x_mcast_ramrod_params rparam = {0};
1662         struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1663
1664         /***************** Cleanup MACs' object first *************************/
1665
1666         /* Wait for completion of requested */
1667         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1668         /* Perform a dry cleanup */
1669         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1670
1671         /* Clean ETH primary MAC */
1672         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1673         rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1674                                  &ramrod_flags);
1675         if (rc != 0)
1676                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1677
1678         /* Cleanup UC list */
1679         vlan_mac_flags = 0;
1680         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1681         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1682                                  &ramrod_flags);
1683         if (rc != 0)
1684                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1685
1686         /***************** Now clean mcast object *****************************/
1687         rparam.mcast_obj = &bp->mcast_obj;
1688         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1689
1690         /* Add a DEL command... */
1691         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1692         if (rc < 0)
1693                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1694                           "object: %d\n", rc);
1695
1696         /* ...and wait until all pending commands are cleared */
1697         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1698         while (rc != 0) {
1699                 if (rc < 0) {
1700                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1701                                   rc);
1702                         return;
1703                 }
1704
1705                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1706         }
1707 }
1708
1709 #ifndef BNX2X_STOP_ON_ERROR
1710 #define LOAD_ERROR_EXIT(bp, label) \
1711         do { \
1712                 (bp)->state = BNX2X_STATE_ERROR; \
1713                 goto label; \
1714         } while (0)
1715 #else
1716 #define LOAD_ERROR_EXIT(bp, label) \
1717         do { \
1718                 (bp)->state = BNX2X_STATE_ERROR; \
1719                 (bp)->panic = 1; \
1720                 return -EBUSY; \
1721         } while (0)
1722 #endif
1723
1724 /* must be called with rtnl_lock */
1725 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1726 {
1727         int port = BP_PORT(bp);
1728         u32 load_code;
1729         int i, rc;
1730
1731 #ifdef BNX2X_STOP_ON_ERROR
1732         if (unlikely(bp->panic))
1733                 return -EPERM;
1734 #endif
1735
1736         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1737
1738         /* Set the initial link reported state to link down */
1739         bnx2x_acquire_phy_lock(bp);
1740         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1741         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1742                 &bp->last_reported_link.link_report_flags);
1743         bnx2x_release_phy_lock(bp);
1744
1745         /* must be called before memory allocation and HW init */
1746         bnx2x_ilt_set_info(bp);
1747
1748         /*
1749          * Zero fastpath structures preserving invariants like napi, which are
1750          * allocated only once, fp index, max_cos, bp pointer.
1751          * Also set fp->disable_tpa.
1752          */
1753         for_each_queue(bp, i)
1754                 bnx2x_bz_fp(bp, i);
1755
1756
1757         /* Set the receive queues buffer size */
1758         bnx2x_set_rx_buf_size(bp);
1759
1760         if (bnx2x_alloc_mem(bp))
1761                 return -ENOMEM;
1762
1763         /* As long as bnx2x_alloc_mem() may possibly update
1764          * bp->num_queues, bnx2x_set_real_num_queues() should always
1765          * come after it.
1766          */
1767         rc = bnx2x_set_real_num_queues(bp);
1768         if (rc) {
1769                 BNX2X_ERR("Unable to set real_num_queues\n");
1770                 LOAD_ERROR_EXIT(bp, load_error0);
1771         }
1772
1773         /* configure multi cos mappings in kernel.
1774          * this configuration may be overriden by a multi class queue discipline
1775          * or by a dcbx negotiation result.
1776          */
1777         bnx2x_setup_tc(bp->dev, bp->max_cos);
1778
1779         bnx2x_napi_enable(bp);
1780
1781         /* Send LOAD_REQUEST command to MCP
1782          * Returns the type of LOAD command:
1783          * if it is the first port to be initialized
1784          * common blocks should be initialized, otherwise - not
1785          */
1786         if (!BP_NOMCP(bp)) {
1787                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1788                 if (!load_code) {
1789                         BNX2X_ERR("MCP response failure, aborting\n");
1790                         rc = -EBUSY;
1791                         LOAD_ERROR_EXIT(bp, load_error1);
1792                 }
1793                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1794                         rc = -EBUSY; /* other port in diagnostic mode */
1795                         LOAD_ERROR_EXIT(bp, load_error1);
1796                 }
1797
1798         } else {
1799                 int path = BP_PATH(bp);
1800
1801                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1802                    path, load_count[path][0], load_count[path][1],
1803                    load_count[path][2]);
1804                 load_count[path][0]++;
1805                 load_count[path][1 + port]++;
1806                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1807                    path, load_count[path][0], load_count[path][1],
1808                    load_count[path][2]);
1809                 if (load_count[path][0] == 1)
1810                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1811                 else if (load_count[path][1 + port] == 1)
1812                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1813                 else
1814                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1815         }
1816
1817         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1818             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1819             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1820                 bp->port.pmf = 1;
1821                 /*
1822                  * We need the barrier to ensure the ordering between the
1823                  * writing to bp->port.pmf here and reading it from the
1824                  * bnx2x_periodic_task().
1825                  */
1826                 smp_mb();
1827                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1828         } else
1829                 bp->port.pmf = 0;
1830
1831         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1832
1833         /* Init Function state controlling object */
1834         bnx2x__init_func_obj(bp);
1835
1836         /* Initialize HW */
1837         rc = bnx2x_init_hw(bp, load_code);
1838         if (rc) {
1839                 BNX2X_ERR("HW init failed, aborting\n");
1840                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1841                 LOAD_ERROR_EXIT(bp, load_error2);
1842         }
1843
1844         /* Connect to IRQs */
1845         rc = bnx2x_setup_irqs(bp);
1846         if (rc) {
1847                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1848                 LOAD_ERROR_EXIT(bp, load_error2);
1849         }
1850
1851         /* Setup NIC internals and enable interrupts */
1852         bnx2x_nic_init(bp, load_code);
1853
1854         /* Init per-function objects */
1855         bnx2x_init_bp_objs(bp);
1856
1857         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1858             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1859             (bp->common.shmem2_base)) {
1860                 if (SHMEM2_HAS(bp, dcc_support))
1861                         SHMEM2_WR(bp, dcc_support,
1862                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1863                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1864         }
1865
1866         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1867         rc = bnx2x_func_start(bp);
1868         if (rc) {
1869                 BNX2X_ERR("Function start failed!\n");
1870                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1871                 LOAD_ERROR_EXIT(bp, load_error3);
1872         }
1873
1874         /* Send LOAD_DONE command to MCP */
1875         if (!BP_NOMCP(bp)) {
1876                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1877                 if (!load_code) {
1878                         BNX2X_ERR("MCP response failure, aborting\n");
1879                         rc = -EBUSY;
1880                         LOAD_ERROR_EXIT(bp, load_error3);
1881                 }
1882         }
1883
1884         rc = bnx2x_setup_leading(bp);
1885         if (rc) {
1886                 BNX2X_ERR("Setup leading failed!\n");
1887                 LOAD_ERROR_EXIT(bp, load_error3);
1888         }
1889
1890 #ifdef BCM_CNIC
1891         /* Enable Timer scan */
1892         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1893 #endif
1894
1895         for_each_nondefault_queue(bp, i) {
1896                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1897                 if (rc)
1898                         LOAD_ERROR_EXIT(bp, load_error4);
1899         }
1900
1901         rc = bnx2x_init_rss_pf(bp);
1902         if (rc)
1903                 LOAD_ERROR_EXIT(bp, load_error4);
1904
1905         /* Now when Clients are configured we are ready to work */
1906         bp->state = BNX2X_STATE_OPEN;
1907
1908         /* Configure a ucast MAC */
1909         rc = bnx2x_set_eth_mac(bp, true);
1910         if (rc)
1911                 LOAD_ERROR_EXIT(bp, load_error4);
1912
1913         if (bp->pending_max) {
1914                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1915                 bp->pending_max = 0;
1916         }
1917
1918         if (bp->port.pmf)
1919                 bnx2x_initial_phy_init(bp, load_mode);
1920
1921         /* Start fast path */
1922
1923         /* Initialize Rx filter. */
1924         netif_addr_lock_bh(bp->dev);
1925         bnx2x_set_rx_mode(bp->dev);
1926         netif_addr_unlock_bh(bp->dev);
1927
1928         /* Start the Tx */
1929         switch (load_mode) {
1930         case LOAD_NORMAL:
1931                 /* Tx queue should be only reenabled */
1932                 netif_tx_wake_all_queues(bp->dev);
1933                 break;
1934
1935         case LOAD_OPEN:
1936                 netif_tx_start_all_queues(bp->dev);
1937                 smp_mb__after_clear_bit();
1938                 break;
1939
1940         case LOAD_DIAG:
1941                 bp->state = BNX2X_STATE_DIAG;
1942                 break;
1943
1944         default:
1945                 break;
1946         }
1947
1948         if (!bp->port.pmf)
1949                 bnx2x__link_status_update(bp);
1950
1951         /* start the timer */
1952         mod_timer(&bp->timer, jiffies + bp->current_interval);
1953
1954 #ifdef BCM_CNIC
1955         bnx2x_setup_cnic_irq_info(bp);
1956         if (bp->state == BNX2X_STATE_OPEN)
1957                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1958 #endif
1959         bnx2x_inc_load_cnt(bp);
1960
1961         /* Wait for all pending SP commands to complete */
1962         if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1963                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1964                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1965                 return -EBUSY;
1966         }
1967
1968         bnx2x_dcbx_init(bp);
1969         return 0;
1970
1971 #ifndef BNX2X_STOP_ON_ERROR
1972 load_error4:
1973 #ifdef BCM_CNIC
1974         /* Disable Timer scan */
1975         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
1976 #endif
1977 load_error3:
1978         bnx2x_int_disable_sync(bp, 1);
1979
1980         /* Clean queueable objects */
1981         bnx2x_squeeze_objects(bp);
1982
1983         /* Free SKBs, SGEs, TPA pool and driver internals */
1984         bnx2x_free_skbs(bp);
1985         for_each_rx_queue(bp, i)
1986                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1987
1988         /* Release IRQs */
1989         bnx2x_free_irq(bp);
1990 load_error2:
1991         if (!BP_NOMCP(bp)) {
1992                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1993                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1994         }
1995
1996         bp->port.pmf = 0;
1997 load_error1:
1998         bnx2x_napi_disable(bp);
1999 load_error0:
2000         bnx2x_free_mem(bp);
2001
2002         return rc;
2003 #endif /* ! BNX2X_STOP_ON_ERROR */
2004 }
2005
2006 /* must be called with rtnl_lock */
2007 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2008 {
2009         int i;
2010         bool global = false;
2011
2012         if ((bp->state == BNX2X_STATE_CLOSED) ||
2013             (bp->state == BNX2X_STATE_ERROR)) {
2014                 /* We can get here if the driver has been unloaded
2015                  * during parity error recovery and is either waiting for a
2016                  * leader to complete or for other functions to unload and
2017                  * then ifdown has been issued. In this case we want to
2018                  * unload and let other functions to complete a recovery
2019                  * process.
2020                  */
2021                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2022                 bp->is_leader = 0;
2023                 bnx2x_release_leader_lock(bp);
2024                 smp_mb();
2025
2026                 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
2027
2028                 return -EINVAL;
2029         }
2030
2031         /*
2032          * It's important to set the bp->state to the value different from
2033          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2034          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2035          */
2036         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2037         smp_mb();
2038
2039         /* Stop Tx */
2040         bnx2x_tx_disable(bp);
2041
2042 #ifdef BCM_CNIC
2043         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2044 #endif
2045
2046         bp->rx_mode = BNX2X_RX_MODE_NONE;
2047
2048         del_timer_sync(&bp->timer);
2049
2050         /* Set ALWAYS_ALIVE bit in shmem */
2051         bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2052
2053         bnx2x_drv_pulse(bp);
2054
2055         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2056
2057         /* Cleanup the chip if needed */
2058         if (unload_mode != UNLOAD_RECOVERY)
2059                 bnx2x_chip_cleanup(bp, unload_mode);
2060         else {
2061                 /* Send the UNLOAD_REQUEST to the MCP */
2062                 bnx2x_send_unload_req(bp, unload_mode);
2063
2064                 /*
2065                  * Prevent transactions to host from the functions on the
2066                  * engine that doesn't reset global blocks in case of global
2067                  * attention once gloabl blocks are reset and gates are opened
2068                  * (the engine which leader will perform the recovery
2069                  * last).
2070                  */
2071                 if (!CHIP_IS_E1x(bp))
2072                         bnx2x_pf_disable(bp);
2073
2074                 /* Disable HW interrupts, NAPI */
2075                 bnx2x_netif_stop(bp, 1);
2076
2077                 /* Release IRQs */
2078                 bnx2x_free_irq(bp);
2079
2080                 /* Report UNLOAD_DONE to MCP */
2081                 bnx2x_send_unload_done(bp);
2082         }
2083
2084         /*
2085          * At this stage no more interrupts will arrive so we may safly clean
2086          * the queueable objects here in case they failed to get cleaned so far.
2087          */
2088         bnx2x_squeeze_objects(bp);
2089
2090         /* There should be no more pending SP commands at this stage */
2091         bp->sp_state = 0;
2092
2093         bp->port.pmf = 0;
2094
2095         /* Free SKBs, SGEs, TPA pool and driver internals */
2096         bnx2x_free_skbs(bp);
2097         for_each_rx_queue(bp, i)
2098                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2099
2100         bnx2x_free_mem(bp);
2101
2102         bp->state = BNX2X_STATE_CLOSED;
2103
2104         /* Check if there are pending parity attentions. If there are - set
2105          * RECOVERY_IN_PROGRESS.
2106          */
2107         if (bnx2x_chk_parity_attn(bp, &global, false)) {
2108                 bnx2x_set_reset_in_progress(bp);
2109
2110                 /* Set RESET_IS_GLOBAL if needed */
2111                 if (global)
2112                         bnx2x_set_reset_global(bp);
2113         }
2114
2115
2116         /* The last driver must disable a "close the gate" if there is no
2117          * parity attention or "process kill" pending.
2118          */
2119         if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2120                 bnx2x_disable_close_the_gate(bp);
2121
2122         return 0;
2123 }
2124
2125 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2126 {
2127         u16 pmcsr;
2128
2129         /* If there is no power capability, silently succeed */
2130         if (!bp->pm_cap) {
2131                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2132                 return 0;
2133         }
2134
2135         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2136
2137         switch (state) {
2138         case PCI_D0:
2139                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2140                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2141                                        PCI_PM_CTRL_PME_STATUS));
2142
2143                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2144                         /* delay required during transition out of D3hot */
2145                         msleep(20);
2146                 break;
2147
2148         case PCI_D3hot:
2149                 /* If there are other clients above don't
2150                    shut down the power */
2151                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2152                         return 0;
2153                 /* Don't shut down the power for emulation and FPGA */
2154                 if (CHIP_REV_IS_SLOW(bp))
2155                         return 0;
2156
2157                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2158                 pmcsr |= 3;
2159
2160                 if (bp->wol)
2161                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2162
2163                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2164                                       pmcsr);
2165
2166                 /* No more memory access after this point until
2167                 * device is brought back to D0.
2168                 */
2169                 break;
2170
2171         default:
2172                 return -EINVAL;
2173         }
2174         return 0;
2175 }
2176
2177 /*
2178  * net_device service functions
2179  */
2180 int bnx2x_poll(struct napi_struct *napi, int budget)
2181 {
2182         int work_done = 0;
2183         u8 cos;
2184         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2185                                                  napi);
2186         struct bnx2x *bp = fp->bp;
2187
2188         while (1) {
2189 #ifdef BNX2X_STOP_ON_ERROR
2190                 if (unlikely(bp->panic)) {
2191                         napi_complete(napi);
2192                         return 0;
2193                 }
2194 #endif
2195
2196                 for_each_cos_in_tx_queue(fp, cos)
2197                         if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2198                                 bnx2x_tx_int(bp, &fp->txdata[cos]);
2199
2200
2201                 if (bnx2x_has_rx_work(fp)) {
2202                         work_done += bnx2x_rx_int(fp, budget - work_done);
2203
2204                         /* must not complete if we consumed full budget */
2205                         if (work_done >= budget)
2206                                 break;
2207                 }
2208
2209                 /* Fall out from the NAPI loop if needed */
2210                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2211 #ifdef BCM_CNIC
2212                         /* No need to update SB for FCoE L2 ring as long as
2213                          * it's connected to the default SB and the SB
2214                          * has been updated when NAPI was scheduled.
2215                          */
2216                         if (IS_FCOE_FP(fp)) {
2217                                 napi_complete(napi);
2218                                 break;
2219                         }
2220 #endif
2221
2222                         bnx2x_update_fpsb_idx(fp);
2223                         /* bnx2x_has_rx_work() reads the status block,
2224                          * thus we need to ensure that status block indices
2225                          * have been actually read (bnx2x_update_fpsb_idx)
2226                          * prior to this check (bnx2x_has_rx_work) so that
2227                          * we won't write the "newer" value of the status block
2228                          * to IGU (if there was a DMA right after
2229                          * bnx2x_has_rx_work and if there is no rmb, the memory
2230                          * reading (bnx2x_update_fpsb_idx) may be postponed
2231                          * to right before bnx2x_ack_sb). In this case there
2232                          * will never be another interrupt until there is
2233                          * another update of the status block, while there
2234                          * is still unhandled work.
2235                          */
2236                         rmb();
2237
2238                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2239                                 napi_complete(napi);
2240                                 /* Re-enable interrupts */
2241                                 DP(NETIF_MSG_HW,
2242                                    "Update index to %d\n", fp->fp_hc_idx);
2243                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2244                                              le16_to_cpu(fp->fp_hc_idx),
2245                                              IGU_INT_ENABLE, 1);
2246                                 break;
2247                         }
2248                 }
2249         }
2250
2251         return work_done;
2252 }
2253
2254 /* we split the first BD into headers and data BDs
2255  * to ease the pain of our fellow microcode engineers
2256  * we use one mapping for both BDs
2257  * So far this has only been observed to happen
2258  * in Other Operating Systems(TM)
2259  */
2260 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2261                                    struct bnx2x_fp_txdata *txdata,
2262                                    struct sw_tx_bd *tx_buf,
2263                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
2264                                    u16 bd_prod, int nbd)
2265 {
2266         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2267         struct eth_tx_bd *d_tx_bd;
2268         dma_addr_t mapping;
2269         int old_len = le16_to_cpu(h_tx_bd->nbytes);
2270
2271         /* first fix first BD */
2272         h_tx_bd->nbd = cpu_to_le16(nbd);
2273         h_tx_bd->nbytes = cpu_to_le16(hlen);
2274
2275         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2276            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2277            h_tx_bd->addr_lo, h_tx_bd->nbd);
2278
2279         /* now get a new data BD
2280          * (after the pbd) and fill it */
2281         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2282         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2283
2284         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2285                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2286
2287         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2288         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2289         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2290
2291         /* this marks the BD as one that has no individual mapping */
2292         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2293
2294         DP(NETIF_MSG_TX_QUEUED,
2295            "TSO split data size is %d (%x:%x)\n",
2296            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2297
2298         /* update tx_bd */
2299         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2300
2301         return bd_prod;
2302 }
2303
2304 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2305 {
2306         if (fix > 0)
2307                 csum = (u16) ~csum_fold(csum_sub(csum,
2308                                 csum_partial(t_header - fix, fix, 0)));
2309
2310         else if (fix < 0)
2311                 csum = (u16) ~csum_fold(csum_add(csum,
2312                                 csum_partial(t_header, -fix, 0)));
2313
2314         return swab16(csum);
2315 }
2316
2317 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2318 {
2319         u32 rc;
2320
2321         if (skb->ip_summed != CHECKSUM_PARTIAL)
2322                 rc = XMIT_PLAIN;
2323
2324         else {
2325                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2326                         rc = XMIT_CSUM_V6;
2327                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2328                                 rc |= XMIT_CSUM_TCP;
2329
2330                 } else {
2331                         rc = XMIT_CSUM_V4;
2332                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2333                                 rc |= XMIT_CSUM_TCP;
2334                 }
2335         }
2336
2337         if (skb_is_gso_v6(skb))
2338                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2339         else if (skb_is_gso(skb))
2340                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2341
2342         return rc;
2343 }
2344
2345 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2346 /* check if packet requires linearization (packet is too fragmented)
2347    no need to check fragmentation if page size > 8K (there will be no
2348    violation to FW restrictions) */
2349 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2350                              u32 xmit_type)
2351 {
2352         int to_copy = 0;
2353         int hlen = 0;
2354         int first_bd_sz = 0;
2355
2356         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2357         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2358
2359                 if (xmit_type & XMIT_GSO) {
2360                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2361                         /* Check if LSO packet needs to be copied:
2362                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2363                         int wnd_size = MAX_FETCH_BD - 3;
2364                         /* Number of windows to check */
2365                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2366                         int wnd_idx = 0;
2367                         int frag_idx = 0;
2368                         u32 wnd_sum = 0;
2369
2370                         /* Headers length */
2371                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2372                                 tcp_hdrlen(skb);
2373
2374                         /* Amount of data (w/o headers) on linear part of SKB*/
2375                         first_bd_sz = skb_headlen(skb) - hlen;
2376
2377                         wnd_sum  = first_bd_sz;
2378
2379                         /* Calculate the first sum - it's special */
2380                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2381                                 wnd_sum +=
2382                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2383
2384                         /* If there was data on linear skb data - check it */
2385                         if (first_bd_sz > 0) {
2386                                 if (unlikely(wnd_sum < lso_mss)) {
2387                                         to_copy = 1;
2388                                         goto exit_lbl;
2389                                 }
2390
2391                                 wnd_sum -= first_bd_sz;
2392                         }
2393
2394                         /* Others are easier: run through the frag list and
2395                            check all windows */
2396                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2397                                 wnd_sum +=
2398                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2399
2400                                 if (unlikely(wnd_sum < lso_mss)) {
2401                                         to_copy = 1;
2402                                         break;
2403                                 }
2404                                 wnd_sum -=
2405                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2406                         }
2407                 } else {
2408                         /* in non-LSO too fragmented packet should always
2409                            be linearized */
2410                         to_copy = 1;
2411                 }
2412         }
2413
2414 exit_lbl:
2415         if (unlikely(to_copy))
2416                 DP(NETIF_MSG_TX_QUEUED,
2417                    "Linearization IS REQUIRED for %s packet. "
2418                    "num_frags %d  hlen %d  first_bd_sz %d\n",
2419                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2420                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2421
2422         return to_copy;
2423 }
2424 #endif
2425
2426 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2427                                         u32 xmit_type)
2428 {
2429         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2430                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2431                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2432         if ((xmit_type & XMIT_GSO_V6) &&
2433             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2434                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2435 }
2436
2437 /**
2438  * bnx2x_set_pbd_gso - update PBD in GSO case.
2439  *
2440  * @skb:        packet skb
2441  * @pbd:        parse BD
2442  * @xmit_type:  xmit flags
2443  */
2444 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2445                                      struct eth_tx_parse_bd_e1x *pbd,
2446                                      u32 xmit_type)
2447 {
2448         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2449         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2450         pbd->tcp_flags = pbd_tcp_flags(skb);
2451
2452         if (xmit_type & XMIT_GSO_V4) {
2453                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2454                 pbd->tcp_pseudo_csum =
2455                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2456                                                   ip_hdr(skb)->daddr,
2457                                                   0, IPPROTO_TCP, 0));
2458
2459         } else
2460                 pbd->tcp_pseudo_csum =
2461                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2462                                                 &ipv6_hdr(skb)->daddr,
2463                                                 0, IPPROTO_TCP, 0));
2464
2465         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2466 }
2467
2468 /**
2469  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2470  *
2471  * @bp:                 driver handle
2472  * @skb:                packet skb
2473  * @parsing_data:       data to be updated
2474  * @xmit_type:          xmit flags
2475  *
2476  * 57712 related
2477  */
2478 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2479         u32 *parsing_data, u32 xmit_type)
2480 {
2481         *parsing_data |=
2482                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2483                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2484                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2485
2486         if (xmit_type & XMIT_CSUM_TCP) {
2487                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2488                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2489                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2490
2491                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2492         } else
2493                 /* We support checksum offload for TCP and UDP only.
2494                  * No need to pass the UDP header length - it's a constant.
2495                  */
2496                 return skb_transport_header(skb) +
2497                                 sizeof(struct udphdr) - skb->data;
2498 }
2499
2500 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2501         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2502 {
2503         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2504
2505         if (xmit_type & XMIT_CSUM_V4)
2506                 tx_start_bd->bd_flags.as_bitfield |=
2507                                         ETH_TX_BD_FLAGS_IP_CSUM;
2508         else
2509                 tx_start_bd->bd_flags.as_bitfield |=
2510                                         ETH_TX_BD_FLAGS_IPV6;
2511
2512         if (!(xmit_type & XMIT_CSUM_TCP))
2513                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2514 }
2515
2516 /**
2517  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2518  *
2519  * @bp:         driver handle
2520  * @skb:        packet skb
2521  * @pbd:        parse BD to be updated
2522  * @xmit_type:  xmit flags
2523  */
2524 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2525         struct eth_tx_parse_bd_e1x *pbd,
2526         u32 xmit_type)
2527 {
2528         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2529
2530         /* for now NS flag is not used in Linux */
2531         pbd->global_data =
2532                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2533                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2534
2535         pbd->ip_hlen_w = (skb_transport_header(skb) -
2536                         skb_network_header(skb)) >> 1;
2537
2538         hlen += pbd->ip_hlen_w;
2539
2540         /* We support checksum offload for TCP and UDP only */
2541         if (xmit_type & XMIT_CSUM_TCP)
2542                 hlen += tcp_hdrlen(skb) / 2;
2543         else
2544                 hlen += sizeof(struct udphdr) / 2;
2545
2546         pbd->total_hlen_w = cpu_to_le16(hlen);
2547         hlen = hlen*2;
2548
2549         if (xmit_type & XMIT_CSUM_TCP) {
2550                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2551
2552         } else {
2553                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2554
2555                 DP(NETIF_MSG_TX_QUEUED,
2556                    "hlen %d  fix %d  csum before fix %x\n",
2557                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2558
2559                 /* HW bug: fixup the CSUM */
2560                 pbd->tcp_pseudo_csum =
2561                         bnx2x_csum_fix(skb_transport_header(skb),
2562                                        SKB_CS(skb), fix);
2563
2564                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2565                    pbd->tcp_pseudo_csum);
2566         }
2567
2568         return hlen;
2569 }
2570
2571 /* called with netif_tx_lock
2572  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2573  * netif_wake_queue()
2574  */
2575 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2576 {
2577         struct bnx2x *bp = netdev_priv(dev);
2578
2579         struct bnx2x_fastpath *fp;
2580         struct netdev_queue *txq;
2581         struct bnx2x_fp_txdata *txdata;
2582         struct sw_tx_bd *tx_buf;
2583         struct eth_tx_start_bd *tx_start_bd, *first_bd;
2584         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2585         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2586         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2587         u32 pbd_e2_parsing_data = 0;
2588         u16 pkt_prod, bd_prod;
2589         int nbd, txq_index, fp_index, txdata_index;
2590         dma_addr_t mapping;
2591         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2592         int i;
2593         u8 hlen = 0;
2594         __le16 pkt_size = 0;
2595         struct ethhdr *eth;
2596         u8 mac_type = UNICAST_ADDRESS;
2597
2598 #ifdef BNX2X_STOP_ON_ERROR
2599         if (unlikely(bp->panic))
2600                 return NETDEV_TX_BUSY;
2601 #endif
2602
2603         txq_index = skb_get_queue_mapping(skb);
2604         txq = netdev_get_tx_queue(dev, txq_index);
2605
2606         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2607
2608         /* decode the fastpath index and the cos index from the txq */
2609         fp_index = TXQ_TO_FP(txq_index);
2610         txdata_index = TXQ_TO_COS(txq_index);
2611
2612 #ifdef BCM_CNIC
2613         /*
2614          * Override the above for the FCoE queue:
2615          *   - FCoE fp entry is right after the ETH entries.
2616          *   - FCoE L2 queue uses bp->txdata[0] only.
2617          */
2618         if (unlikely(!NO_FCOE(bp) && (txq_index ==
2619                                       bnx2x_fcoe_tx(bp, txq_index)))) {
2620                 fp_index = FCOE_IDX;
2621                 txdata_index = 0;
2622         }
2623 #endif
2624
2625         /* enable this debug print to view the transmission queue being used
2626         DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
2627            txq_index, fp_index, txdata_index); */
2628
2629         /* locate the fastpath and the txdata */
2630         fp = &bp->fp[fp_index];
2631         txdata = &fp->txdata[txdata_index];
2632
2633         /* enable this debug print to view the tranmission details
2634         DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2635                         " tx_data ptr %p fp pointer %p\n",
2636            txdata->cid, fp_index, txdata_index, txdata, fp); */
2637
2638         if (unlikely(bnx2x_tx_avail(bp, txdata) <
2639                      (skb_shinfo(skb)->nr_frags + 3))) {
2640                 fp->eth_q_stats.driver_xoff++;
2641                 netif_tx_stop_queue(txq);
2642                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2643                 return NETDEV_TX_BUSY;
2644         }
2645
2646         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2647                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2648            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2649            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2650
2651         eth = (struct ethhdr *)skb->data;
2652
2653         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2654         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2655                 if (is_broadcast_ether_addr(eth->h_dest))
2656                         mac_type = BROADCAST_ADDRESS;
2657                 else
2658                         mac_type = MULTICAST_ADDRESS;
2659         }
2660
2661 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2662         /* First, check if we need to linearize the skb (due to FW
2663            restrictions). No need to check fragmentation if page size > 8K
2664            (there will be no violation to FW restrictions) */
2665         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2666                 /* Statistics of linearization */
2667                 bp->lin_cnt++;
2668                 if (skb_linearize(skb) != 0) {
2669                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2670                            "silently dropping this SKB\n");
2671                         dev_kfree_skb_any(skb);
2672                         return NETDEV_TX_OK;
2673                 }
2674         }
2675 #endif
2676         /* Map skb linear data for DMA */
2677         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2678                                  skb_headlen(skb), DMA_TO_DEVICE);
2679         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2680                 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2681                    "silently dropping this SKB\n");
2682                 dev_kfree_skb_any(skb);
2683                 return NETDEV_TX_OK;
2684         }
2685         /*
2686         Please read carefully. First we use one BD which we mark as start,
2687         then we have a parsing info BD (used for TSO or xsum),
2688         and only then we have the rest of the TSO BDs.
2689         (don't forget to mark the last one as last,
2690         and to unmap only AFTER you write to the BD ...)
2691         And above all, all pdb sizes are in words - NOT DWORDS!
2692         */
2693
2694         /* get current pkt produced now - advance it just before sending packet
2695          * since mapping of pages may fail and cause packet to be dropped
2696          */
2697         pkt_prod = txdata->tx_pkt_prod;
2698         bd_prod = TX_BD(txdata->tx_bd_prod);
2699
2700         /* get a tx_buf and first BD
2701          * tx_start_bd may be changed during SPLIT,
2702          * but first_bd will always stay first
2703          */
2704         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2705         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2706         first_bd = tx_start_bd;
2707
2708         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2709         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2710                  mac_type);
2711
2712         /* header nbd */
2713         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2714
2715         /* remember the first BD of the packet */
2716         tx_buf->first_bd = txdata->tx_bd_prod;
2717         tx_buf->skb = skb;
2718         tx_buf->flags = 0;
2719
2720         DP(NETIF_MSG_TX_QUEUED,
2721            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2722            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2723
2724         if (vlan_tx_tag_present(skb)) {
2725                 tx_start_bd->vlan_or_ethertype =
2726                     cpu_to_le16(vlan_tx_tag_get(skb));
2727                 tx_start_bd->bd_flags.as_bitfield |=
2728                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2729         } else
2730                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2731
2732         /* turn on parsing and get a BD */
2733         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2734
2735         if (xmit_type & XMIT_CSUM)
2736                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2737
2738         if (!CHIP_IS_E1x(bp)) {
2739                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2740                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2741                 /* Set PBD in checksum offload case */
2742                 if (xmit_type & XMIT_CSUM)
2743                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2744                                                      &pbd_e2_parsing_data,
2745                                                      xmit_type);
2746                 if (IS_MF_SI(bp)) {
2747                         /*
2748                          * fill in the MAC addresses in the PBD - for local
2749                          * switching
2750                          */
2751                         bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2752                                               &pbd_e2->src_mac_addr_mid,
2753                                               &pbd_e2->src_mac_addr_lo,
2754                                               eth->h_source);
2755                         bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2756                                               &pbd_e2->dst_mac_addr_mid,
2757                                               &pbd_e2->dst_mac_addr_lo,
2758                                               eth->h_dest);
2759                 }
2760         } else {
2761                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2762                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2763                 /* Set PBD in checksum offload case */
2764                 if (xmit_type & XMIT_CSUM)
2765                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2766
2767         }
2768
2769         /* Setup the data pointer of the first BD of the packet */
2770         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2771         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2772         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2773         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2774         pkt_size = tx_start_bd->nbytes;
2775
2776         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2777            "  nbytes %d  flags %x  vlan %x\n",
2778            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2779            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2780            tx_start_bd->bd_flags.as_bitfield,
2781            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2782
2783         if (xmit_type & XMIT_GSO) {
2784
2785                 DP(NETIF_MSG_TX_QUEUED,
2786                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2787                    skb->len, hlen, skb_headlen(skb),
2788                    skb_shinfo(skb)->gso_size);
2789
2790                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2791
2792                 if (unlikely(skb_headlen(skb) > hlen))
2793                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2794                                                  &tx_start_bd, hlen,
2795                                                  bd_prod, ++nbd);
2796                 if (!CHIP_IS_E1x(bp))
2797                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2798                                              xmit_type);
2799                 else
2800                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2801         }
2802
2803         /* Set the PBD's parsing_data field if not zero
2804          * (for the chips newer than 57711).
2805          */
2806         if (pbd_e2_parsing_data)
2807                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2808
2809         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2810
2811         /* Handle fragmented skb */
2812         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2813                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2814
2815                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2816                                            skb_frag_size(frag), DMA_TO_DEVICE);
2817                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2818
2819                         DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2820                                                 "dropping packet...\n");
2821
2822                         /* we need unmap all buffers already mapped
2823                          * for this SKB;
2824                          * first_bd->nbd need to be properly updated
2825                          * before call to bnx2x_free_tx_pkt
2826                          */
2827                         first_bd->nbd = cpu_to_le16(nbd);
2828                         bnx2x_free_tx_pkt(bp, txdata,
2829                                           TX_BD(txdata->tx_pkt_prod));
2830                         return NETDEV_TX_OK;
2831                 }
2832
2833                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2834                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2835                 if (total_pkt_bd == NULL)
2836                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2837
2838                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2839                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2840                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2841                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
2842                 nbd++;
2843
2844                 DP(NETIF_MSG_TX_QUEUED,
2845                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2846                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2847                    le16_to_cpu(tx_data_bd->nbytes));
2848         }
2849
2850         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2851
2852         /* update with actual num BDs */
2853         first_bd->nbd = cpu_to_le16(nbd);
2854
2855         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2856
2857         /* now send a tx doorbell, counting the next BD
2858          * if the packet contains or ends with it
2859          */
2860         if (TX_BD_POFF(bd_prod) < nbd)
2861                 nbd++;
2862
2863         /* total_pkt_bytes should be set on the first data BD if
2864          * it's not an LSO packet and there is more than one
2865          * data BD. In this case pkt_size is limited by an MTU value.
2866          * However we prefer to set it for an LSO packet (while we don't
2867          * have to) in order to save some CPU cycles in a none-LSO
2868          * case, when we much more care about them.
2869          */
2870         if (total_pkt_bd != NULL)
2871                 total_pkt_bd->total_pkt_bytes = pkt_size;
2872
2873         if (pbd_e1x)
2874                 DP(NETIF_MSG_TX_QUEUED,
2875                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2876                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2877                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2878                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2879                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2880                     le16_to_cpu(pbd_e1x->total_hlen_w));
2881         if (pbd_e2)
2882                 DP(NETIF_MSG_TX_QUEUED,
2883                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2884                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2885                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2886                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2887                    pbd_e2->parsing_data);
2888         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2889
2890         txdata->tx_pkt_prod++;
2891         /*
2892          * Make sure that the BD data is updated before updating the producer
2893          * since FW might read the BD right after the producer is updated.
2894          * This is only applicable for weak-ordered memory model archs such
2895          * as IA-64. The following barrier is also mandatory since FW will
2896          * assumes packets must have BDs.
2897          */
2898         wmb();
2899
2900         txdata->tx_db.data.prod += nbd;
2901         barrier();
2902
2903         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2904
2905         mmiowb();
2906
2907         txdata->tx_bd_prod += nbd;
2908
2909         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2910                 netif_tx_stop_queue(txq);
2911
2912                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2913                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2914                  * fp->bd_tx_cons */
2915                 smp_mb();
2916
2917                 fp->eth_q_stats.driver_xoff++;
2918                 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2919                         netif_tx_wake_queue(txq);
2920         }
2921         txdata->tx_pkt++;
2922
2923         return NETDEV_TX_OK;
2924 }
2925
2926 /**
2927  * bnx2x_setup_tc - routine to configure net_device for multi tc
2928  *
2929  * @netdev: net device to configure
2930  * @tc: number of traffic classes to enable
2931  *
2932  * callback connected to the ndo_setup_tc function pointer
2933  */
2934 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2935 {
2936         int cos, prio, count, offset;
2937         struct bnx2x *bp = netdev_priv(dev);
2938
2939         /* setup tc must be called under rtnl lock */
2940         ASSERT_RTNL();
2941
2942         /* no traffic classes requested. aborting */
2943         if (!num_tc) {
2944                 netdev_reset_tc(dev);
2945                 return 0;
2946         }
2947
2948         /* requested to support too many traffic classes */
2949         if (num_tc > bp->max_cos) {
2950                 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2951                                      " requested: %d. max supported is %d\n",
2952                                      num_tc, bp->max_cos);
2953                 return -EINVAL;
2954         }
2955
2956         /* declare amount of supported traffic classes */
2957         if (netdev_set_num_tc(dev, num_tc)) {
2958                 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
2959                                      num_tc);
2960                 return -EINVAL;
2961         }
2962
2963         /* configure priority to traffic class mapping */
2964         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2965                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2966                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
2967                    prio, bp->prio_to_cos[prio]);
2968         }
2969
2970
2971         /* Use this configuration to diffrentiate tc0 from other COSes
2972            This can be used for ets or pfc, and save the effort of setting
2973            up a multio class queue disc or negotiating DCBX with a switch
2974         netdev_set_prio_tc_map(dev, 0, 0);
2975         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
2976         for (prio = 1; prio < 16; prio++) {
2977                 netdev_set_prio_tc_map(dev, prio, 1);
2978                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
2979         } */
2980
2981         /* configure traffic class to transmission queue mapping */
2982         for (cos = 0; cos < bp->max_cos; cos++) {
2983                 count = BNX2X_NUM_ETH_QUEUES(bp);
2984                 offset = cos * MAX_TXQS_PER_COS;
2985                 netdev_set_tc_queue(dev, cos, count, offset);
2986                 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
2987                    cos, offset, count);
2988         }
2989
2990         return 0;
2991 }
2992
2993 /* called with rtnl_lock */
2994 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2995 {
2996         struct sockaddr *addr = p;
2997         struct bnx2x *bp = netdev_priv(dev);
2998         int rc = 0;
2999
3000         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
3001                 return -EINVAL;
3002
3003         if (netif_running(dev))  {
3004                 rc = bnx2x_set_eth_mac(bp, false);
3005                 if (rc)
3006                         return rc;
3007         }
3008
3009         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3010
3011         if (netif_running(dev))
3012                 rc = bnx2x_set_eth_mac(bp, true);
3013
3014         return rc;
3015 }
3016
3017 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3018 {
3019         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3020         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3021         u8 cos;
3022
3023         /* Common */
3024 #ifdef BCM_CNIC
3025         if (IS_FCOE_IDX(fp_index)) {
3026                 memset(sb, 0, sizeof(union host_hc_status_block));
3027                 fp->status_blk_mapping = 0;
3028
3029         } else {
3030 #endif
3031                 /* status blocks */
3032                 if (!CHIP_IS_E1x(bp))
3033                         BNX2X_PCI_FREE(sb->e2_sb,
3034                                        bnx2x_fp(bp, fp_index,
3035                                                 status_blk_mapping),
3036                                        sizeof(struct host_hc_status_block_e2));
3037                 else
3038                         BNX2X_PCI_FREE(sb->e1x_sb,
3039                                        bnx2x_fp(bp, fp_index,
3040                                                 status_blk_mapping),
3041                                        sizeof(struct host_hc_status_block_e1x));
3042 #ifdef BCM_CNIC
3043         }
3044 #endif
3045         /* Rx */
3046         if (!skip_rx_queue(bp, fp_index)) {
3047                 bnx2x_free_rx_bds(fp);
3048
3049                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3050                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3051                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3052                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
3053                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
3054
3055                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3056                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
3057                                sizeof(struct eth_fast_path_rx_cqe) *
3058                                NUM_RCQ_BD);
3059
3060                 /* SGE ring */
3061                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3062                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3063                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
3064                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3065         }
3066
3067         /* Tx */
3068         if (!skip_tx_queue(bp, fp_index)) {
3069                 /* fastpath tx rings: tx_buf tx_desc */
3070                 for_each_cos_in_tx_queue(fp, cos) {
3071                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3072
3073                         DP(BNX2X_MSG_SP,
3074                            "freeing tx memory of fp %d cos %d cid %d\n",
3075                            fp_index, cos, txdata->cid);
3076
3077                         BNX2X_FREE(txdata->tx_buf_ring);
3078                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
3079                                 txdata->tx_desc_mapping,
3080                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3081                 }
3082         }
3083         /* end of fastpath */
3084 }
3085
3086 void bnx2x_free_fp_mem(struct bnx2x *bp)
3087 {
3088         int i;
3089         for_each_queue(bp, i)
3090                 bnx2x_free_fp_mem_at(bp, i);
3091 }
3092
3093 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3094 {
3095         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3096         if (!CHIP_IS_E1x(bp)) {
3097                 bnx2x_fp(bp, index, sb_index_values) =
3098                         (__le16 *)status_blk.e2_sb->sb.index_values;
3099                 bnx2x_fp(bp, index, sb_running_index) =
3100                         (__le16 *)status_blk.e2_sb->sb.running_index;
3101         } else {
3102                 bnx2x_fp(bp, index, sb_index_values) =
3103                         (__le16 *)status_blk.e1x_sb->sb.index_values;
3104                 bnx2x_fp(bp, index, sb_running_index) =
3105                         (__le16 *)status_blk.e1x_sb->sb.running_index;
3106         }
3107 }
3108
3109 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3110 {
3111         union host_hc_status_block *sb;
3112         struct bnx2x_fastpath *fp = &bp->fp[index];
3113         int ring_size = 0;
3114         u8 cos;
3115         int rx_ring_size = 0;
3116
3117         /* if rx_ring_size specified - use it */
3118         if (!bp->rx_ring_size) {
3119
3120                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3121
3122                 /* allocate at least number of buffers required by FW */
3123                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3124                                      MIN_RX_SIZE_TPA, rx_ring_size);
3125
3126                 bp->rx_ring_size = rx_ring_size;
3127         } else
3128                 rx_ring_size = bp->rx_ring_size;
3129
3130         /* Common */
3131         sb = &bnx2x_fp(bp, index, status_blk);
3132 #ifdef BCM_CNIC
3133         if (!IS_FCOE_IDX(index)) {
3134 #endif
3135                 /* status blocks */
3136                 if (!CHIP_IS_E1x(bp))
3137                         BNX2X_PCI_ALLOC(sb->e2_sb,
3138                                 &bnx2x_fp(bp, index, status_blk_mapping),
3139                                 sizeof(struct host_hc_status_block_e2));
3140                 else
3141                         BNX2X_PCI_ALLOC(sb->e1x_sb,
3142                                 &bnx2x_fp(bp, index, status_blk_mapping),
3143                             sizeof(struct host_hc_status_block_e1x));
3144 #ifdef BCM_CNIC
3145         }
3146 #endif
3147
3148         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3149          * set shortcuts for it.
3150          */
3151         if (!IS_FCOE_IDX(index))
3152                 set_sb_shortcuts(bp, index);
3153
3154         /* Tx */
3155         if (!skip_tx_queue(bp, index)) {
3156                 /* fastpath tx rings: tx_buf tx_desc */
3157                 for_each_cos_in_tx_queue(fp, cos) {
3158                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3159
3160                         DP(BNX2X_MSG_SP, "allocating tx memory of "
3161                                          "fp %d cos %d\n",
3162                            index, cos);
3163
3164                         BNX2X_ALLOC(txdata->tx_buf_ring,
3165                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3166                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3167                                 &txdata->tx_desc_mapping,
3168                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3169                 }
3170         }
3171
3172         /* Rx */
3173         if (!skip_rx_queue(bp, index)) {
3174                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3175                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3176                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3177                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3178                                 &bnx2x_fp(bp, index, rx_desc_mapping),
3179                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3180
3181                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3182                                 &bnx2x_fp(bp, index, rx_comp_mapping),
3183                                 sizeof(struct eth_fast_path_rx_cqe) *
3184                                 NUM_RCQ_BD);
3185
3186                 /* SGE ring */
3187                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3188                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3189                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3190                                 &bnx2x_fp(bp, index, rx_sge_mapping),
3191                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3192                 /* RX BD ring */
3193                 bnx2x_set_next_page_rx_bd(fp);
3194
3195                 /* CQ ring */
3196                 bnx2x_set_next_page_rx_cq(fp);
3197
3198                 /* BDs */
3199                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3200                 if (ring_size < rx_ring_size)
3201                         goto alloc_mem_err;
3202         }
3203
3204         return 0;
3205
3206 /* handles low memory cases */
3207 alloc_mem_err:
3208         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3209                                                 index, ring_size);
3210         /* FW will drop all packets if queue is not big enough,
3211          * In these cases we disable the queue
3212          * Min size is different for OOO, TPA and non-TPA queues
3213          */
3214         if (ring_size < (fp->disable_tpa ?
3215                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3216                         /* release memory allocated for this queue */
3217                         bnx2x_free_fp_mem_at(bp, index);
3218                         return -ENOMEM;
3219         }
3220         return 0;
3221 }
3222
3223 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3224 {
3225         int i;
3226
3227         /**
3228          * 1. Allocate FP for leading - fatal if error
3229          * 2. {CNIC} Allocate FCoE FP - fatal if error
3230          * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3231          * 4. Allocate RSS - fix number of queues if error
3232          */
3233
3234         /* leading */
3235         if (bnx2x_alloc_fp_mem_at(bp, 0))
3236                 return -ENOMEM;
3237
3238 #ifdef BCM_CNIC
3239         if (!NO_FCOE(bp))
3240                 /* FCoE */
3241                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3242                         /* we will fail load process instead of mark
3243                          * NO_FCOE_FLAG
3244                          */
3245                         return -ENOMEM;
3246 #endif
3247
3248         /* RSS */
3249         for_each_nondefault_eth_queue(bp, i)
3250                 if (bnx2x_alloc_fp_mem_at(bp, i))
3251                         break;
3252
3253         /* handle memory failures */
3254         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3255                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3256
3257                 WARN_ON(delta < 0);
3258 #ifdef BCM_CNIC
3259                 /**
3260                  * move non eth FPs next to last eth FP
3261                  * must be done in that order
3262                  * FCOE_IDX < FWD_IDX < OOO_IDX
3263                  */
3264
3265                 /* move FCoE fp even NO_FCOE_FLAG is on */
3266                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3267 #endif
3268                 bp->num_queues -= delta;
3269                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3270                           bp->num_queues + delta, bp->num_queues);
3271         }
3272
3273         return 0;
3274 }
3275
3276 void bnx2x_free_mem_bp(struct bnx2x *bp)
3277 {
3278         kfree(bp->fp);
3279         kfree(bp->msix_table);
3280         kfree(bp->ilt);
3281 }
3282
3283 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3284 {
3285         struct bnx2x_fastpath *fp;
3286         struct msix_entry *tbl;
3287         struct bnx2x_ilt *ilt;
3288         int msix_table_size = 0;
3289
3290         /*
3291          * The biggest MSI-X table we might need is as a maximum number of fast
3292          * path IGU SBs plus default SB (for PF).
3293          */
3294         msix_table_size = bp->igu_sb_cnt + 1;
3295
3296         /* fp array: RSS plus CNIC related L2 queues */
3297         fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3298                      sizeof(*fp), GFP_KERNEL);
3299         if (!fp)
3300                 goto alloc_err;
3301         bp->fp = fp;
3302
3303         /* msix table */
3304         tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
3305         if (!tbl)
3306                 goto alloc_err;
3307         bp->msix_table = tbl;
3308
3309         /* ilt */
3310         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3311         if (!ilt)
3312                 goto alloc_err;
3313         bp->ilt = ilt;
3314
3315         return 0;
3316 alloc_err:
3317         bnx2x_free_mem_bp(bp);
3318         return -ENOMEM;
3319
3320 }
3321
3322 int bnx2x_reload_if_running(struct net_device *dev)
3323 {
3324         struct bnx2x *bp = netdev_priv(dev);
3325
3326         if (unlikely(!netif_running(dev)))
3327                 return 0;
3328
3329         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3330         return bnx2x_nic_load(bp, LOAD_NORMAL);
3331 }
3332
3333 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3334 {
3335         u32 sel_phy_idx = 0;
3336         if (bp->link_params.num_phys <= 1)
3337                 return INT_PHY;
3338
3339         if (bp->link_vars.link_up) {
3340                 sel_phy_idx = EXT_PHY1;
3341                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3342                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3343                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3344                         sel_phy_idx = EXT_PHY2;
3345         } else {
3346
3347                 switch (bnx2x_phy_selection(&bp->link_params)) {
3348                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3349                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3350                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3351                        sel_phy_idx = EXT_PHY1;
3352                        break;
3353                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3354                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3355                        sel_phy_idx = EXT_PHY2;
3356                        break;
3357                 }
3358         }
3359
3360         return sel_phy_idx;
3361
3362 }
3363 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3364 {
3365         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3366         /*
3367          * The selected actived PHY is always after swapping (in case PHY
3368          * swapping is enabled). So when swapping is enabled, we need to reverse
3369          * the configuration
3370          */
3371
3372         if (bp->link_params.multi_phy_config &
3373             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3374                 if (sel_phy_idx == EXT_PHY1)
3375                         sel_phy_idx = EXT_PHY2;
3376                 else if (sel_phy_idx == EXT_PHY2)
3377                         sel_phy_idx = EXT_PHY1;
3378         }
3379         return LINK_CONFIG_IDX(sel_phy_idx);
3380 }
3381
3382 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3383 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3384 {
3385         struct bnx2x *bp = netdev_priv(dev);
3386         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3387
3388         switch (type) {
3389         case NETDEV_FCOE_WWNN:
3390                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3391                                 cp->fcoe_wwn_node_name_lo);
3392                 break;
3393         case NETDEV_FCOE_WWPN:
3394                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3395                                 cp->fcoe_wwn_port_name_lo);
3396                 break;
3397         default:
3398                 return -EINVAL;
3399         }
3400
3401         return 0;
3402 }
3403 #endif
3404
3405 /* called with rtnl_lock */
3406 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3407 {
3408         struct bnx2x *bp = netdev_priv(dev);
3409
3410         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3411                 pr_err("Handling parity error recovery. Try again later\n");
3412                 return -EAGAIN;
3413         }
3414
3415         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3416             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3417                 return -EINVAL;
3418
3419         /* This does not race with packet allocation
3420          * because the actual alloc size is
3421          * only updated as part of load
3422          */
3423         dev->mtu = new_mtu;
3424
3425         return bnx2x_reload_if_running(dev);
3426 }
3427
3428 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3429 {
3430         struct bnx2x *bp = netdev_priv(dev);
3431
3432         /* TPA requires Rx CSUM offloading */
3433         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3434                 features &= ~NETIF_F_LRO;
3435
3436         return features;
3437 }
3438
3439 int bnx2x_set_features(struct net_device *dev, u32 features)
3440 {
3441         struct bnx2x *bp = netdev_priv(dev);
3442         u32 flags = bp->flags;
3443         bool bnx2x_reload = false;
3444
3445         if (features & NETIF_F_LRO)
3446                 flags |= TPA_ENABLE_FLAG;
3447         else
3448                 flags &= ~TPA_ENABLE_FLAG;
3449
3450         if (features & NETIF_F_LOOPBACK) {
3451                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3452                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
3453                         bnx2x_reload = true;
3454                 }
3455         } else {
3456                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3457                         bp->link_params.loopback_mode = LOOPBACK_NONE;
3458                         bnx2x_reload = true;
3459                 }
3460         }
3461
3462         if (flags ^ bp->flags) {
3463                 bp->flags = flags;
3464                 bnx2x_reload = true;
3465         }
3466
3467         if (bnx2x_reload) {
3468                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3469                         return bnx2x_reload_if_running(dev);
3470                 /* else: bnx2x_nic_load() will be called at end of recovery */
3471         }
3472
3473         return 0;
3474 }
3475
3476 void bnx2x_tx_timeout(struct net_device *dev)
3477 {
3478         struct bnx2x *bp = netdev_priv(dev);
3479
3480 #ifdef BNX2X_STOP_ON_ERROR
3481         if (!bp->panic)
3482                 bnx2x_panic();
3483 #endif
3484
3485         smp_mb__before_clear_bit();
3486         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3487         smp_mb__after_clear_bit();
3488
3489         /* This allows the netif to be shutdown gracefully before resetting */
3490         schedule_delayed_work(&bp->sp_rtnl_task, 0);
3491 }
3492
3493 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3494 {
3495         struct net_device *dev = pci_get_drvdata(pdev);
3496         struct bnx2x *bp;
3497
3498         if (!dev) {
3499                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3500                 return -ENODEV;
3501         }
3502         bp = netdev_priv(dev);
3503
3504         rtnl_lock();
3505
3506         pci_save_state(pdev);
3507
3508         if (!netif_running(dev)) {
3509                 rtnl_unlock();
3510                 return 0;
3511         }
3512
3513         netif_device_detach(dev);
3514
3515         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3516
3517         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3518
3519         rtnl_unlock();
3520
3521         return 0;
3522 }
3523
3524 int bnx2x_resume(struct pci_dev *pdev)
3525 {
3526         struct net_device *dev = pci_get_drvdata(pdev);
3527         struct bnx2x *bp;
3528         int rc;
3529
3530         if (!dev) {
3531                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3532                 return -ENODEV;
3533         }
3534         bp = netdev_priv(dev);
3535
3536         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3537                 pr_err("Handling parity error recovery. Try again later\n");
3538                 return -EAGAIN;
3539         }
3540
3541         rtnl_lock();
3542
3543         pci_restore_state(pdev);
3544
3545         if (!netif_running(dev)) {
3546                 rtnl_unlock();
3547                 return 0;
3548         }
3549
3550         bnx2x_set_power_state(bp, PCI_D0);
3551         netif_device_attach(dev);
3552
3553         /* Since the chip was reset, clear the FW sequence number */
3554         bp->fw_seq = 0;
3555         rc = bnx2x_nic_load(bp, LOAD_OPEN);
3556
3557         rtnl_unlock();
3558
3559         return rc;
3560 }
3561
3562
3563 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3564                               u32 cid)
3565 {
3566         /* ustorm cxt validation */
3567         cxt->ustorm_ag_context.cdu_usage =
3568                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3569                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3570         /* xcontext validation */
3571         cxt->xstorm_ag_context.cdu_reserved =
3572                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3573                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3574 }
3575
3576 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3577                                              u8 fw_sb_id, u8 sb_index,
3578                                              u8 ticks)
3579 {
3580
3581         u32 addr = BAR_CSTRORM_INTMEM +
3582                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3583         REG_WR8(bp, addr, ticks);
3584         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3585                           port, fw_sb_id, sb_index, ticks);
3586 }
3587
3588 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3589                                              u16 fw_sb_id, u8 sb_index,
3590                                              u8 disable)
3591 {
3592         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3593         u32 addr = BAR_CSTRORM_INTMEM +
3594                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3595         u16 flags = REG_RD16(bp, addr);
3596         /* clear and set */
3597         flags &= ~HC_INDEX_DATA_HC_ENABLED;
3598         flags |= enable_flag;
3599         REG_WR16(bp, addr, flags);
3600         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3601                           port, fw_sb_id, sb_index, disable);
3602 }
3603
3604 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3605                                     u8 sb_index, u8 disable, u16 usec)
3606 {
3607         int port = BP_PORT(bp);
3608         u8 ticks = usec / BNX2X_BTR;
3609
3610         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3611
3612         disable = disable ? 1 : (usec ? 0 : 1);
3613         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3614 }