Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
21 #include <linux/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
27 #include "bnx2x_init.h"
28 #include "bnx2x_sp.h"
29
30
31
32 /**
33  * bnx2x_bz_fp - zero content of the fastpath structure.
34  *
35  * @bp:         driver handle
36  * @index:      fastpath index to be zeroed
37  *
38  * Makes sure the contents of the bp->fp[index].napi is kept
39  * intact.
40  */
41 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42 {
43         struct bnx2x_fastpath *fp = &bp->fp[index];
44         struct napi_struct orig_napi = fp->napi;
45         /* bzero bnx2x_fastpath contents */
46         memset(fp, 0, sizeof(*fp));
47
48         /* Restore the NAPI object as it has been already initialized */
49         fp->napi = orig_napi;
50
51         fp->bp = bp;
52         fp->index = index;
53         if (IS_ETH_FP(fp))
54                 fp->max_cos = bp->max_cos;
55         else
56                 /* Special queues support only one CoS */
57                 fp->max_cos = 1;
58
59         /*
60          * set the tpa flag for each queue. The tpa flag determines the queue
61          * minimal size so it must be set prior to queue memory allocation
62          */
63         fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64
65 #ifdef BCM_CNIC
66         /* We don't want TPA on FCoE, FWD and OOO L2 rings */
67         bnx2x_fcoe(bp, disable_tpa) = 1;
68 #endif
69 }
70
71 /**
72  * bnx2x_move_fp - move content of the fastpath structure.
73  *
74  * @bp:         driver handle
75  * @from:       source FP index
76  * @to:         destination FP index
77  *
78  * Makes sure the contents of the bp->fp[to].napi is kept
79  * intact.
80  */
81 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
82 {
83         struct bnx2x_fastpath *from_fp = &bp->fp[from];
84         struct bnx2x_fastpath *to_fp = &bp->fp[to];
85         struct napi_struct orig_napi = to_fp->napi;
86         /* Move bnx2x_fastpath contents */
87         memcpy(to_fp, from_fp, sizeof(*to_fp));
88         to_fp->index = to;
89
90         /* Restore the NAPI object as it has been already initialized */
91         to_fp->napi = orig_napi;
92 }
93
94 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
95
96 /* free skb in the packet ring at pos idx
97  * return idx of last bd freed
98  */
99 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
100                              u16 idx)
101 {
102         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
103         struct eth_tx_start_bd *tx_start_bd;
104         struct eth_tx_bd *tx_data_bd;
105         struct sk_buff *skb = tx_buf->skb;
106         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
107         int nbd;
108
109         /* prefetch skb end pointer to speedup dev_kfree_skb() */
110         prefetch(&skb->end);
111
112         DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
113            txdata->txq_index, idx, tx_buf, skb);
114
115         /* unmap first bd */
116         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
117         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
118         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
119                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
120
121
122         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
123 #ifdef BNX2X_STOP_ON_ERROR
124         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
125                 BNX2X_ERR("BAD nbd!\n");
126                 bnx2x_panic();
127         }
128 #endif
129         new_cons = nbd + tx_buf->first_bd;
130
131         /* Get the next bd */
132         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
133
134         /* Skip a parse bd... */
135         --nbd;
136         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137
138         /* ...and the TSO split header bd since they have no mapping */
139         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
140                 --nbd;
141                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142         }
143
144         /* now free frags */
145         while (nbd > 0) {
146
147                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
148                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
149                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
150                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
151                 if (--nbd)
152                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153         }
154
155         /* release skb */
156         WARN_ON(!skb);
157         dev_kfree_skb_any(skb);
158         tx_buf->first_bd = 0;
159         tx_buf->skb = NULL;
160
161         return new_cons;
162 }
163
164 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
165 {
166         struct netdev_queue *txq;
167         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
168
169 #ifdef BNX2X_STOP_ON_ERROR
170         if (unlikely(bp->panic))
171                 return -1;
172 #endif
173
174         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176         sw_cons = txdata->tx_pkt_cons;
177
178         while (sw_cons != hw_cons) {
179                 u16 pkt_cons;
180
181                 pkt_cons = TX_BD(sw_cons);
182
183                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
184                                       " pkt_cons %u\n",
185                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
186
187                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
188                 sw_cons++;
189         }
190
191         txdata->tx_pkt_cons = sw_cons;
192         txdata->tx_bd_cons = bd_cons;
193
194         /* Need to make the tx_bd_cons update visible to start_xmit()
195          * before checking for netif_tx_queue_stopped().  Without the
196          * memory barrier, there is a small possibility that
197          * start_xmit() will miss it and cause the queue to be stopped
198          * forever.
199          * On the other hand we need an rmb() here to ensure the proper
200          * ordering of bit testing in the following
201          * netif_tx_queue_stopped(txq) call.
202          */
203         smp_mb();
204
205         if (unlikely(netif_tx_queue_stopped(txq))) {
206                 /* Taking tx_lock() is needed to prevent reenabling the queue
207                  * while it's empty. This could have happen if rx_action() gets
208                  * suspended in bnx2x_tx_int() after the condition before
209                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
210                  *
211                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
212                  * sends some packets consuming the whole queue again->
213                  * stops the queue
214                  */
215
216                 __netif_tx_lock(txq, smp_processor_id());
217
218                 if ((netif_tx_queue_stopped(txq)) &&
219                     (bp->state == BNX2X_STATE_OPEN) &&
220                     (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
221                         netif_tx_wake_queue(txq);
222
223                 __netif_tx_unlock(txq);
224         }
225         return 0;
226 }
227
228 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
229                                              u16 idx)
230 {
231         u16 last_max = fp->last_max_sge;
232
233         if (SUB_S16(idx, last_max) > 0)
234                 fp->last_max_sge = idx;
235 }
236
237 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
238                                   struct eth_fast_path_rx_cqe *fp_cqe)
239 {
240         struct bnx2x *bp = fp->bp;
241         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
242                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
243                       SGE_PAGE_SHIFT;
244         u16 last_max, last_elem, first_elem;
245         u16 delta = 0;
246         u16 i;
247
248         if (!sge_len)
249                 return;
250
251         /* First mark all used pages */
252         for (i = 0; i < sge_len; i++)
253                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
254                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
255
256         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
257            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
258
259         /* Here we assume that the last SGE index is the biggest */
260         prefetch((void *)(fp->sge_mask));
261         bnx2x_update_last_max_sge(fp,
262                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
263
264         last_max = RX_SGE(fp->last_max_sge);
265         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
266         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
267
268         /* If ring is not full */
269         if (last_elem + 1 != first_elem)
270                 last_elem++;
271
272         /* Now update the prod */
273         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
274                 if (likely(fp->sge_mask[i]))
275                         break;
276
277                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
278                 delta += BIT_VEC64_ELEM_SZ;
279         }
280
281         if (delta > 0) {
282                 fp->rx_sge_prod += delta;
283                 /* clear page-end entries */
284                 bnx2x_clear_sge_mask_next_elems(fp);
285         }
286
287         DP(NETIF_MSG_RX_STATUS,
288            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
289            fp->last_max_sge, fp->rx_sge_prod);
290 }
291
292 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
293                             struct sk_buff *skb, u16 cons, u16 prod,
294                             struct eth_fast_path_rx_cqe *cqe)
295 {
296         struct bnx2x *bp = fp->bp;
297         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
298         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
299         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
300         dma_addr_t mapping;
301         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
302         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
303
304         /* print error if current state != stop */
305         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
306                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
307
308         /* Try to map an empty skb from the aggregation info  */
309         mapping = dma_map_single(&bp->pdev->dev,
310                                  first_buf->skb->data,
311                                  fp->rx_buf_size, DMA_FROM_DEVICE);
312         /*
313          *  ...if it fails - move the skb from the consumer to the producer
314          *  and set the current aggregation state as ERROR to drop it
315          *  when TPA_STOP arrives.
316          */
317
318         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
319                 /* Move the BD from the consumer to the producer */
320                 bnx2x_reuse_rx_skb(fp, cons, prod);
321                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
322                 return;
323         }
324
325         /* move empty skb from pool to prod */
326         prod_rx_buf->skb = first_buf->skb;
327         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
328         /* point prod_bd to new skb */
329         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
330         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
331
332         /* move partial skb from cons to pool (don't unmap yet) */
333         *first_buf = *cons_rx_buf;
334
335         /* mark bin state as START */
336         tpa_info->parsing_flags =
337                 le16_to_cpu(cqe->pars_flags.flags);
338         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
339         tpa_info->tpa_state = BNX2X_TPA_START;
340         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
341         tpa_info->placement_offset = cqe->placement_offset;
342
343 #ifdef BNX2X_STOP_ON_ERROR
344         fp->tpa_queue_used |= (1 << queue);
345 #ifdef _ASM_GENERIC_INT_L64_H
346         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
347 #else
348         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
349 #endif
350            fp->tpa_queue_used);
351 #endif
352 }
353
354 /* Timestamp option length allowed for TPA aggregation:
355  *
356  *              nop nop kind length echo val
357  */
358 #define TPA_TSTAMP_OPT_LEN      12
359 /**
360  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
361  *
362  * @bp:                 driver handle
363  * @parsing_flags:      parsing flags from the START CQE
364  * @len_on_bd:          total length of the first packet for the
365  *                      aggregation.
366  *
367  * Approximate value of the MSS for this aggregation calculated using
368  * the first packet of it.
369  */
370 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
371                                     u16 len_on_bd)
372 {
373         /*
374          * TPA arrgregation won't have either IP options or TCP options
375          * other than timestamp or IPv6 extension headers.
376          */
377         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
378
379         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
380             PRS_FLAG_OVERETH_IPV6)
381                 hdrs_len += sizeof(struct ipv6hdr);
382         else /* IPv4 */
383                 hdrs_len += sizeof(struct iphdr);
384
385
386         /* Check if there was a TCP timestamp, if there is it's will
387          * always be 12 bytes length: nop nop kind length echo val.
388          *
389          * Otherwise FW would close the aggregation.
390          */
391         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
392                 hdrs_len += TPA_TSTAMP_OPT_LEN;
393
394         return len_on_bd - hdrs_len;
395 }
396
397 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
398                                u16 queue, struct sk_buff *skb,
399                                struct eth_end_agg_rx_cqe *cqe,
400                                u16 cqe_idx)
401 {
402         struct sw_rx_page *rx_pg, old_rx_pg;
403         u32 i, frag_len, frag_size, pages;
404         int err;
405         int j;
406         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
407         u16 len_on_bd = tpa_info->len_on_bd;
408
409         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
410         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
411
412         /* This is needed in order to enable forwarding support */
413         if (frag_size)
414                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
415                                         tpa_info->parsing_flags, len_on_bd);
416
417 #ifdef BNX2X_STOP_ON_ERROR
418         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
419                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
420                           pages, cqe_idx);
421                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
422                 bnx2x_panic();
423                 return -EINVAL;
424         }
425 #endif
426
427         /* Run through the SGL and compose the fragmented skb */
428         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
429                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
430
431                 /* FW gives the indices of the SGE as if the ring is an array
432                    (meaning that "next" element will consume 2 indices) */
433                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
434                 rx_pg = &fp->rx_page_ring[sge_idx];
435                 old_rx_pg = *rx_pg;
436
437                 /* If we fail to allocate a substitute page, we simply stop
438                    where we are and drop the whole packet */
439                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
440                 if (unlikely(err)) {
441                         fp->eth_q_stats.rx_skb_alloc_failed++;
442                         return err;
443                 }
444
445                 /* Unmap the page as we r going to pass it to the stack */
446                 dma_unmap_page(&bp->pdev->dev,
447                                dma_unmap_addr(&old_rx_pg, mapping),
448                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
449
450                 /* Add one frag and update the appropriate fields in the skb */
451                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
452
453                 skb->data_len += frag_len;
454                 skb->truesize += frag_len;
455                 skb->len += frag_len;
456
457                 frag_size -= frag_len;
458         }
459
460         return 0;
461 }
462
463 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
464                            u16 queue, struct eth_end_agg_rx_cqe *cqe,
465                            u16 cqe_idx)
466 {
467         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
468         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
469         u8 pad = tpa_info->placement_offset;
470         u16 len = tpa_info->len_on_bd;
471         struct sk_buff *skb = rx_buf->skb;
472         /* alloc new skb */
473         struct sk_buff *new_skb;
474         u8 old_tpa_state = tpa_info->tpa_state;
475
476         tpa_info->tpa_state = BNX2X_TPA_STOP;
477
478         /* If we there was an error during the handling of the TPA_START -
479          * drop this aggregation.
480          */
481         if (old_tpa_state == BNX2X_TPA_ERROR)
482                 goto drop;
483
484         /* Try to allocate the new skb */
485         new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
486
487         /* Unmap skb in the pool anyway, as we are going to change
488            pool entry status to BNX2X_TPA_STOP even if new skb allocation
489            fails. */
490         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
491                          fp->rx_buf_size, DMA_FROM_DEVICE);
492
493         if (likely(new_skb)) {
494                 prefetch(skb);
495                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
496
497 #ifdef BNX2X_STOP_ON_ERROR
498                 if (pad + len > fp->rx_buf_size) {
499                         BNX2X_ERR("skb_put is about to fail...  "
500                                   "pad %d  len %d  rx_buf_size %d\n",
501                                   pad, len, fp->rx_buf_size);
502                         bnx2x_panic();
503                         return;
504                 }
505 #endif
506
507                 skb_reserve(skb, pad);
508                 skb_put(skb, len);
509
510                 skb->protocol = eth_type_trans(skb, bp->dev);
511                 skb->ip_summed = CHECKSUM_UNNECESSARY;
512
513                 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
514                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
515                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
516                         napi_gro_receive(&fp->napi, skb);
517                 } else {
518                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
519                            " - dropping packet!\n");
520                         dev_kfree_skb_any(skb);
521                 }
522
523
524                 /* put new skb in bin */
525                 rx_buf->skb = new_skb;
526
527                 return;
528         }
529
530 drop:
531         /* drop the packet and keep the buffer in the bin */
532         DP(NETIF_MSG_RX_STATUS,
533            "Failed to allocate or map a new skb - dropping packet!\n");
534         fp->eth_q_stats.rx_skb_alloc_failed++;
535 }
536
537 /* Set Toeplitz hash value in the skb using the value from the
538  * CQE (calculated by HW).
539  */
540 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
541                                         struct sk_buff *skb)
542 {
543         /* Set Toeplitz hash from CQE */
544         if ((bp->dev->features & NETIF_F_RXHASH) &&
545             (cqe->fast_path_cqe.status_flags &
546              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
547                 skb->rxhash =
548                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
549 }
550
551 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
552 {
553         struct bnx2x *bp = fp->bp;
554         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
555         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
556         int rx_pkt = 0;
557
558 #ifdef BNX2X_STOP_ON_ERROR
559         if (unlikely(bp->panic))
560                 return 0;
561 #endif
562
563         /* CQ "next element" is of the size of the regular element,
564            that's why it's ok here */
565         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
566         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
567                 hw_comp_cons++;
568
569         bd_cons = fp->rx_bd_cons;
570         bd_prod = fp->rx_bd_prod;
571         bd_prod_fw = bd_prod;
572         sw_comp_cons = fp->rx_comp_cons;
573         sw_comp_prod = fp->rx_comp_prod;
574
575         /* Memory barrier necessary as speculative reads of the rx
576          * buffer can be ahead of the index in the status block
577          */
578         rmb();
579
580         DP(NETIF_MSG_RX_STATUS,
581            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
582            fp->index, hw_comp_cons, sw_comp_cons);
583
584         while (sw_comp_cons != hw_comp_cons) {
585                 struct sw_rx_bd *rx_buf = NULL;
586                 struct sk_buff *skb;
587                 union eth_rx_cqe *cqe;
588                 struct eth_fast_path_rx_cqe *cqe_fp;
589                 u8 cqe_fp_flags;
590                 enum eth_rx_cqe_type cqe_fp_type;
591                 u16 len, pad;
592
593 #ifdef BNX2X_STOP_ON_ERROR
594                 if (unlikely(bp->panic))
595                         return 0;
596 #endif
597
598                 comp_ring_cons = RCQ_BD(sw_comp_cons);
599                 bd_prod = RX_BD(bd_prod);
600                 bd_cons = RX_BD(bd_cons);
601
602                 /* Prefetch the page containing the BD descriptor
603                    at producer's index. It will be needed when new skb is
604                    allocated */
605                 prefetch((void *)(PAGE_ALIGN((unsigned long)
606                                              (&fp->rx_desc_ring[bd_prod])) -
607                                   PAGE_SIZE + 1));
608
609                 cqe = &fp->rx_comp_ring[comp_ring_cons];
610                 cqe_fp = &cqe->fast_path_cqe;
611                 cqe_fp_flags = cqe_fp->type_error_flags;
612                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
613
614                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
615                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
616                    cqe_fp_flags, cqe_fp->status_flags,
617                    le32_to_cpu(cqe_fp->rss_hash_result),
618                    le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
619
620                 /* is this a slowpath msg? */
621                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
622                         bnx2x_sp_event(fp, cqe);
623                         goto next_cqe;
624
625                 /* this is an rx packet */
626                 } else {
627                         rx_buf = &fp->rx_buf_ring[bd_cons];
628                         skb = rx_buf->skb;
629                         prefetch(skb);
630
631                         if (!CQE_TYPE_FAST(cqe_fp_type)) {
632 #ifdef BNX2X_STOP_ON_ERROR
633                                 /* sanity check */
634                                 if (fp->disable_tpa &&
635                                     (CQE_TYPE_START(cqe_fp_type) ||
636                                      CQE_TYPE_STOP(cqe_fp_type)))
637                                         BNX2X_ERR("START/STOP packet while "
638                                                   "disable_tpa type %x\n",
639                                                   CQE_TYPE(cqe_fp_type));
640 #endif
641
642                                 if (CQE_TYPE_START(cqe_fp_type)) {
643                                         u16 queue = cqe_fp->queue_index;
644                                         DP(NETIF_MSG_RX_STATUS,
645                                            "calling tpa_start on queue %d\n",
646                                            queue);
647
648                                         bnx2x_tpa_start(fp, queue, skb,
649                                                         bd_cons, bd_prod,
650                                                         cqe_fp);
651
652                                         /* Set Toeplitz hash for LRO skb */
653                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
654
655                                         goto next_rx;
656
657                                 } else {
658                                         u16 queue =
659                                                 cqe->end_agg_cqe.queue_index;
660                                         DP(NETIF_MSG_RX_STATUS,
661                                            "calling tpa_stop on queue %d\n",
662                                            queue);
663
664                                         bnx2x_tpa_stop(bp, fp, queue,
665                                                        &cqe->end_agg_cqe,
666                                                        comp_ring_cons);
667 #ifdef BNX2X_STOP_ON_ERROR
668                                         if (bp->panic)
669                                                 return 0;
670 #endif
671
672                                         bnx2x_update_sge_prod(fp, cqe_fp);
673                                         goto next_cqe;
674                                 }
675                         }
676                         /* non TPA */
677                         len = le16_to_cpu(cqe_fp->pkt_len);
678                         pad = cqe_fp->placement_offset;
679                         dma_sync_single_for_cpu(&bp->pdev->dev,
680                                         dma_unmap_addr(rx_buf, mapping),
681                                                        pad + RX_COPY_THRESH,
682                                                        DMA_FROM_DEVICE);
683                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
684
685                         /* is this an error packet? */
686                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
687                                 DP(NETIF_MSG_RX_ERR,
688                                    "ERROR  flags %x  rx packet %u\n",
689                                    cqe_fp_flags, sw_comp_cons);
690                                 fp->eth_q_stats.rx_err_discard_pkt++;
691                                 goto reuse_rx;
692                         }
693
694                         /* Since we don't have a jumbo ring
695                          * copy small packets if mtu > 1500
696                          */
697                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
698                             (len <= RX_COPY_THRESH)) {
699                                 struct sk_buff *new_skb;
700
701                                 new_skb = netdev_alloc_skb(bp->dev, len + pad);
702                                 if (new_skb == NULL) {
703                                         DP(NETIF_MSG_RX_ERR,
704                                            "ERROR  packet dropped "
705                                            "because of alloc failure\n");
706                                         fp->eth_q_stats.rx_skb_alloc_failed++;
707                                         goto reuse_rx;
708                                 }
709
710                                 /* aligned copy */
711                                 skb_copy_from_linear_data_offset(skb, pad,
712                                                     new_skb->data + pad, len);
713                                 skb_reserve(new_skb, pad);
714                                 skb_put(new_skb, len);
715
716                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
717
718                                 skb = new_skb;
719
720                         } else
721                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
722                                 dma_unmap_single(&bp->pdev->dev,
723                                         dma_unmap_addr(rx_buf, mapping),
724                                                  fp->rx_buf_size,
725                                                  DMA_FROM_DEVICE);
726                                 skb_reserve(skb, pad);
727                                 skb_put(skb, len);
728
729                         } else {
730                                 DP(NETIF_MSG_RX_ERR,
731                                    "ERROR  packet dropped because "
732                                    "of alloc failure\n");
733                                 fp->eth_q_stats.rx_skb_alloc_failed++;
734 reuse_rx:
735                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
736                                 goto next_rx;
737                         }
738
739                         skb->protocol = eth_type_trans(skb, bp->dev);
740
741                         /* Set Toeplitz hash for a none-LRO skb */
742                         bnx2x_set_skb_rxhash(bp, cqe, skb);
743
744                         skb_checksum_none_assert(skb);
745
746                         if (bp->dev->features & NETIF_F_RXCSUM) {
747
748                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
749                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
750                                 else
751                                         fp->eth_q_stats.hw_csum_err++;
752                         }
753                 }
754
755                 skb_record_rx_queue(skb, fp->index);
756
757                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
758                     PARSING_FLAGS_VLAN)
759                         __vlan_hwaccel_put_tag(skb,
760                                                le16_to_cpu(cqe_fp->vlan_tag));
761                 napi_gro_receive(&fp->napi, skb);
762
763
764 next_rx:
765                 rx_buf->skb = NULL;
766
767                 bd_cons = NEXT_RX_IDX(bd_cons);
768                 bd_prod = NEXT_RX_IDX(bd_prod);
769                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
770                 rx_pkt++;
771 next_cqe:
772                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
773                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
774
775                 if (rx_pkt == budget)
776                         break;
777         } /* while */
778
779         fp->rx_bd_cons = bd_cons;
780         fp->rx_bd_prod = bd_prod_fw;
781         fp->rx_comp_cons = sw_comp_cons;
782         fp->rx_comp_prod = sw_comp_prod;
783
784         /* Update producers */
785         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
786                              fp->rx_sge_prod);
787
788         fp->rx_pkt += rx_pkt;
789         fp->rx_calls++;
790
791         return rx_pkt;
792 }
793
794 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
795 {
796         struct bnx2x_fastpath *fp = fp_cookie;
797         struct bnx2x *bp = fp->bp;
798         u8 cos;
799
800         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
801                          "[fp %d fw_sd %d igusb %d]\n",
802            fp->index, fp->fw_sb_id, fp->igu_sb_id);
803         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
804
805 #ifdef BNX2X_STOP_ON_ERROR
806         if (unlikely(bp->panic))
807                 return IRQ_HANDLED;
808 #endif
809
810         /* Handle Rx and Tx according to MSI-X vector */
811         prefetch(fp->rx_cons_sb);
812
813         for_each_cos_in_tx_queue(fp, cos)
814                 prefetch(fp->txdata[cos].tx_cons_sb);
815
816         prefetch(&fp->sb_running_index[SM_RX_ID]);
817         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
818
819         return IRQ_HANDLED;
820 }
821
822 /* HW Lock for shared dual port PHYs */
823 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
824 {
825         mutex_lock(&bp->port.phy_mutex);
826
827         if (bp->port.need_hw_lock)
828                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
829 }
830
831 void bnx2x_release_phy_lock(struct bnx2x *bp)
832 {
833         if (bp->port.need_hw_lock)
834                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
835
836         mutex_unlock(&bp->port.phy_mutex);
837 }
838
839 /* calculates MF speed according to current linespeed and MF configuration */
840 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
841 {
842         u16 line_speed = bp->link_vars.line_speed;
843         if (IS_MF(bp)) {
844                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
845                                                    bp->mf_config[BP_VN(bp)]);
846
847                 /* Calculate the current MAX line speed limit for the MF
848                  * devices
849                  */
850                 if (IS_MF_SI(bp))
851                         line_speed = (line_speed * maxCfg) / 100;
852                 else { /* SD mode */
853                         u16 vn_max_rate = maxCfg * 100;
854
855                         if (vn_max_rate < line_speed)
856                                 line_speed = vn_max_rate;
857                 }
858         }
859
860         return line_speed;
861 }
862
863 /**
864  * bnx2x_fill_report_data - fill link report data to report
865  *
866  * @bp:         driver handle
867  * @data:       link state to update
868  *
869  * It uses a none-atomic bit operations because is called under the mutex.
870  */
871 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
872                                           struct bnx2x_link_report_data *data)
873 {
874         u16 line_speed = bnx2x_get_mf_speed(bp);
875
876         memset(data, 0, sizeof(*data));
877
878         /* Fill the report data: efective line speed */
879         data->line_speed = line_speed;
880
881         /* Link is down */
882         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
883                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
884                           &data->link_report_flags);
885
886         /* Full DUPLEX */
887         if (bp->link_vars.duplex == DUPLEX_FULL)
888                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
889
890         /* Rx Flow Control is ON */
891         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
892                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
893
894         /* Tx Flow Control is ON */
895         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
896                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
897 }
898
899 /**
900  * bnx2x_link_report - report link status to OS.
901  *
902  * @bp:         driver handle
903  *
904  * Calls the __bnx2x_link_report() under the same locking scheme
905  * as a link/PHY state managing code to ensure a consistent link
906  * reporting.
907  */
908
909 void bnx2x_link_report(struct bnx2x *bp)
910 {
911         bnx2x_acquire_phy_lock(bp);
912         __bnx2x_link_report(bp);
913         bnx2x_release_phy_lock(bp);
914 }
915
916 /**
917  * __bnx2x_link_report - report link status to OS.
918  *
919  * @bp:         driver handle
920  *
921  * None atomic inmlementation.
922  * Should be called under the phy_lock.
923  */
924 void __bnx2x_link_report(struct bnx2x *bp)
925 {
926         struct bnx2x_link_report_data cur_data;
927
928         /* reread mf_cfg */
929         if (!CHIP_IS_E1(bp))
930                 bnx2x_read_mf_cfg(bp);
931
932         /* Read the current link report info */
933         bnx2x_fill_report_data(bp, &cur_data);
934
935         /* Don't report link down or exactly the same link status twice */
936         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
937             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
938                       &bp->last_reported_link.link_report_flags) &&
939              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
940                       &cur_data.link_report_flags)))
941                 return;
942
943         bp->link_cnt++;
944
945         /* We are going to report a new link parameters now -
946          * remember the current data for the next time.
947          */
948         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
949
950         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
951                      &cur_data.link_report_flags)) {
952                 netif_carrier_off(bp->dev);
953                 netdev_err(bp->dev, "NIC Link is Down\n");
954                 return;
955         } else {
956                 netif_carrier_on(bp->dev);
957                 netdev_info(bp->dev, "NIC Link is Up, ");
958                 pr_cont("%d Mbps ", cur_data.line_speed);
959
960                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
961                                        &cur_data.link_report_flags))
962                         pr_cont("full duplex");
963                 else
964                         pr_cont("half duplex");
965
966                 /* Handle the FC at the end so that only these flags would be
967                  * possibly set. This way we may easily check if there is no FC
968                  * enabled.
969                  */
970                 if (cur_data.link_report_flags) {
971                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
972                                      &cur_data.link_report_flags)) {
973                                 pr_cont(", receive ");
974                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
975                                      &cur_data.link_report_flags))
976                                         pr_cont("& transmit ");
977                         } else {
978                                 pr_cont(", transmit ");
979                         }
980                         pr_cont("flow control ON");
981                 }
982                 pr_cont("\n");
983         }
984 }
985
986 void bnx2x_init_rx_rings(struct bnx2x *bp)
987 {
988         int func = BP_FUNC(bp);
989         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
990                                               ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
991         u16 ring_prod;
992         int i, j;
993
994         /* Allocate TPA resources */
995         for_each_rx_queue(bp, j) {
996                 struct bnx2x_fastpath *fp = &bp->fp[j];
997
998                 DP(NETIF_MSG_IFUP,
999                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1000
1001                 if (!fp->disable_tpa) {
1002                         /* Fill the per-aggregtion pool */
1003                         for (i = 0; i < max_agg_queues; i++) {
1004                                 struct bnx2x_agg_info *tpa_info =
1005                                         &fp->tpa_info[i];
1006                                 struct sw_rx_bd *first_buf =
1007                                         &tpa_info->first_buf;
1008
1009                                 first_buf->skb = netdev_alloc_skb(bp->dev,
1010                                                        fp->rx_buf_size);
1011                                 if (!first_buf->skb) {
1012                                         BNX2X_ERR("Failed to allocate TPA "
1013                                                   "skb pool for queue[%d] - "
1014                                                   "disabling TPA on this "
1015                                                   "queue!\n", j);
1016                                         bnx2x_free_tpa_pool(bp, fp, i);
1017                                         fp->disable_tpa = 1;
1018                                         break;
1019                                 }
1020                                 dma_unmap_addr_set(first_buf, mapping, 0);
1021                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1022                         }
1023
1024                         /* "next page" elements initialization */
1025                         bnx2x_set_next_page_sgl(fp);
1026
1027                         /* set SGEs bit mask */
1028                         bnx2x_init_sge_ring_bit_mask(fp);
1029
1030                         /* Allocate SGEs and initialize the ring elements */
1031                         for (i = 0, ring_prod = 0;
1032                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1033
1034                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1035                                         BNX2X_ERR("was only able to allocate "
1036                                                   "%d rx sges\n", i);
1037                                         BNX2X_ERR("disabling TPA for "
1038                                                   "queue[%d]\n", j);
1039                                         /* Cleanup already allocated elements */
1040                                         bnx2x_free_rx_sge_range(bp, fp,
1041                                                                 ring_prod);
1042                                         bnx2x_free_tpa_pool(bp, fp,
1043                                                             max_agg_queues);
1044                                         fp->disable_tpa = 1;
1045                                         ring_prod = 0;
1046                                         break;
1047                                 }
1048                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1049                         }
1050
1051                         fp->rx_sge_prod = ring_prod;
1052                 }
1053         }
1054
1055         for_each_rx_queue(bp, j) {
1056                 struct bnx2x_fastpath *fp = &bp->fp[j];
1057
1058                 fp->rx_bd_cons = 0;
1059
1060                 /* Activate BD ring */
1061                 /* Warning!
1062                  * this will generate an interrupt (to the TSTORM)
1063                  * must only be done after chip is initialized
1064                  */
1065                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1066                                      fp->rx_sge_prod);
1067
1068                 if (j != 0)
1069                         continue;
1070
1071                 if (CHIP_IS_E1(bp)) {
1072                         REG_WR(bp, BAR_USTRORM_INTMEM +
1073                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1074                                U64_LO(fp->rx_comp_mapping));
1075                         REG_WR(bp, BAR_USTRORM_INTMEM +
1076                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1077                                U64_HI(fp->rx_comp_mapping));
1078                 }
1079         }
1080 }
1081
1082 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1083 {
1084         int i;
1085         u8 cos;
1086
1087         for_each_tx_queue(bp, i) {
1088                 struct bnx2x_fastpath *fp = &bp->fp[i];
1089                 for_each_cos_in_tx_queue(fp, cos) {
1090                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1091
1092                         u16 bd_cons = txdata->tx_bd_cons;
1093                         u16 sw_prod = txdata->tx_pkt_prod;
1094                         u16 sw_cons = txdata->tx_pkt_cons;
1095
1096                         while (sw_cons != sw_prod) {
1097                                 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1098                                                             TX_BD(sw_cons));
1099                                 sw_cons++;
1100                         }
1101                 }
1102         }
1103 }
1104
1105 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1106 {
1107         struct bnx2x *bp = fp->bp;
1108         int i;
1109
1110         /* ring wasn't allocated */
1111         if (fp->rx_buf_ring == NULL)
1112                 return;
1113
1114         for (i = 0; i < NUM_RX_BD; i++) {
1115                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1116                 struct sk_buff *skb = rx_buf->skb;
1117
1118                 if (skb == NULL)
1119                         continue;
1120                 dma_unmap_single(&bp->pdev->dev,
1121                                  dma_unmap_addr(rx_buf, mapping),
1122                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1123
1124                 rx_buf->skb = NULL;
1125                 dev_kfree_skb(skb);
1126         }
1127 }
1128
1129 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1130 {
1131         int j;
1132
1133         for_each_rx_queue(bp, j) {
1134                 struct bnx2x_fastpath *fp = &bp->fp[j];
1135
1136                 bnx2x_free_rx_bds(fp);
1137
1138                 if (!fp->disable_tpa)
1139                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1140                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
1141                                             ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
1142         }
1143 }
1144
1145 void bnx2x_free_skbs(struct bnx2x *bp)
1146 {
1147         bnx2x_free_tx_skbs(bp);
1148         bnx2x_free_rx_skbs(bp);
1149 }
1150
1151 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1152 {
1153         /* load old values */
1154         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1155
1156         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1157                 /* leave all but MAX value */
1158                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1159
1160                 /* set new MAX value */
1161                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1162                                 & FUNC_MF_CFG_MAX_BW_MASK;
1163
1164                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1165         }
1166 }
1167
1168 /**
1169  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1170  *
1171  * @bp:         driver handle
1172  * @nvecs:      number of vectors to be released
1173  */
1174 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1175 {
1176         int i, offset = 0;
1177
1178         if (nvecs == offset)
1179                 return;
1180         free_irq(bp->msix_table[offset].vector, bp->dev);
1181         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1182            bp->msix_table[offset].vector);
1183         offset++;
1184 #ifdef BCM_CNIC
1185         if (nvecs == offset)
1186                 return;
1187         offset++;
1188 #endif
1189
1190         for_each_eth_queue(bp, i) {
1191                 if (nvecs == offset)
1192                         return;
1193                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1194                    "irq\n", i, bp->msix_table[offset].vector);
1195
1196                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1197         }
1198 }
1199
1200 void bnx2x_free_irq(struct bnx2x *bp)
1201 {
1202         if (bp->flags & USING_MSIX_FLAG)
1203                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1204                                      CNIC_PRESENT + 1);
1205         else if (bp->flags & USING_MSI_FLAG)
1206                 free_irq(bp->pdev->irq, bp->dev);
1207         else
1208                 free_irq(bp->pdev->irq, bp->dev);
1209 }
1210
1211 int bnx2x_enable_msix(struct bnx2x *bp)
1212 {
1213         int msix_vec = 0, i, rc, req_cnt;
1214
1215         bp->msix_table[msix_vec].entry = msix_vec;
1216         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1217            bp->msix_table[0].entry);
1218         msix_vec++;
1219
1220 #ifdef BCM_CNIC
1221         bp->msix_table[msix_vec].entry = msix_vec;
1222         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1223            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1224         msix_vec++;
1225 #endif
1226         /* We need separate vectors for ETH queues only (not FCoE) */
1227         for_each_eth_queue(bp, i) {
1228                 bp->msix_table[msix_vec].entry = msix_vec;
1229                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1230                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1231                 msix_vec++;
1232         }
1233
1234         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1235
1236         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1237
1238         /*
1239          * reconfigure number of tx/rx queues according to available
1240          * MSI-X vectors
1241          */
1242         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1243                 /* how less vectors we will have? */
1244                 int diff = req_cnt - rc;
1245
1246                 DP(NETIF_MSG_IFUP,
1247                    "Trying to use less MSI-X vectors: %d\n", rc);
1248
1249                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1250
1251                 if (rc) {
1252                         DP(NETIF_MSG_IFUP,
1253                            "MSI-X is not attainable  rc %d\n", rc);
1254                         return rc;
1255                 }
1256                 /*
1257                  * decrease number of queues by number of unallocated entries
1258                  */
1259                 bp->num_queues -= diff;
1260
1261                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1262                                   bp->num_queues);
1263         } else if (rc) {
1264                 /* fall to INTx if not enough memory */
1265                 if (rc == -ENOMEM)
1266                         bp->flags |= DISABLE_MSI_FLAG;
1267                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1268                 return rc;
1269         }
1270
1271         bp->flags |= USING_MSIX_FLAG;
1272
1273         return 0;
1274 }
1275
1276 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1277 {
1278         int i, rc, offset = 0;
1279
1280         rc = request_irq(bp->msix_table[offset++].vector,
1281                          bnx2x_msix_sp_int, 0,
1282                          bp->dev->name, bp->dev);
1283         if (rc) {
1284                 BNX2X_ERR("request sp irq failed\n");
1285                 return -EBUSY;
1286         }
1287
1288 #ifdef BCM_CNIC
1289         offset++;
1290 #endif
1291         for_each_eth_queue(bp, i) {
1292                 struct bnx2x_fastpath *fp = &bp->fp[i];
1293                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1294                          bp->dev->name, i);
1295
1296                 rc = request_irq(bp->msix_table[offset].vector,
1297                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1298                 if (rc) {
1299                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1300                               bp->msix_table[offset].vector, rc);
1301                         bnx2x_free_msix_irqs(bp, offset);
1302                         return -EBUSY;
1303                 }
1304
1305                 offset++;
1306         }
1307
1308         i = BNX2X_NUM_ETH_QUEUES(bp);
1309         offset = 1 + CNIC_PRESENT;
1310         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1311                " ... fp[%d] %d\n",
1312                bp->msix_table[0].vector,
1313                0, bp->msix_table[offset].vector,
1314                i - 1, bp->msix_table[offset + i - 1].vector);
1315
1316         return 0;
1317 }
1318
1319 int bnx2x_enable_msi(struct bnx2x *bp)
1320 {
1321         int rc;
1322
1323         rc = pci_enable_msi(bp->pdev);
1324         if (rc) {
1325                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1326                 return -1;
1327         }
1328         bp->flags |= USING_MSI_FLAG;
1329
1330         return 0;
1331 }
1332
1333 static int bnx2x_req_irq(struct bnx2x *bp)
1334 {
1335         unsigned long flags;
1336         int rc;
1337
1338         if (bp->flags & USING_MSI_FLAG)
1339                 flags = 0;
1340         else
1341                 flags = IRQF_SHARED;
1342
1343         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1344                          bp->dev->name, bp->dev);
1345         return rc;
1346 }
1347
1348 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1349 {
1350         int rc = 0;
1351         if (bp->flags & USING_MSIX_FLAG) {
1352                 rc = bnx2x_req_msix_irqs(bp);
1353                 if (rc)
1354                         return rc;
1355         } else {
1356                 bnx2x_ack_int(bp);
1357                 rc = bnx2x_req_irq(bp);
1358                 if (rc) {
1359                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1360                         return rc;
1361                 }
1362                 if (bp->flags & USING_MSI_FLAG) {
1363                         bp->dev->irq = bp->pdev->irq;
1364                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
1365                                bp->pdev->irq);
1366                 }
1367         }
1368
1369         return 0;
1370 }
1371
1372 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1373 {
1374         int i;
1375
1376         for_each_rx_queue(bp, i)
1377                 napi_enable(&bnx2x_fp(bp, i, napi));
1378 }
1379
1380 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1381 {
1382         int i;
1383
1384         for_each_rx_queue(bp, i)
1385                 napi_disable(&bnx2x_fp(bp, i, napi));
1386 }
1387
1388 void bnx2x_netif_start(struct bnx2x *bp)
1389 {
1390         if (netif_running(bp->dev)) {
1391                 bnx2x_napi_enable(bp);
1392                 bnx2x_int_enable(bp);
1393                 if (bp->state == BNX2X_STATE_OPEN)
1394                         netif_tx_wake_all_queues(bp->dev);
1395         }
1396 }
1397
1398 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1399 {
1400         bnx2x_int_disable_sync(bp, disable_hw);
1401         bnx2x_napi_disable(bp);
1402 }
1403
1404 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1405 {
1406         struct bnx2x *bp = netdev_priv(dev);
1407 #ifdef BCM_CNIC
1408         if (NO_FCOE(bp))
1409                 return skb_tx_hash(dev, skb);
1410         else {
1411                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412                 u16 ether_type = ntohs(hdr->h_proto);
1413
1414                 /* Skip VLAN tag if present */
1415                 if (ether_type == ETH_P_8021Q) {
1416                         struct vlan_ethhdr *vhdr =
1417                                 (struct vlan_ethhdr *)skb->data;
1418
1419                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1420                 }
1421
1422                 /* If ethertype is FCoE or FIP - use FCoE ring */
1423                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1424                         return bnx2x_fcoe_tx(bp, txq_index);
1425         }
1426 #endif
1427         /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
1428          */
1429         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1430 }
1431
1432 void bnx2x_set_num_queues(struct bnx2x *bp)
1433 {
1434         switch (bp->multi_mode) {
1435         case ETH_RSS_MODE_DISABLED:
1436                 bp->num_queues = 1;
1437                 break;
1438         case ETH_RSS_MODE_REGULAR:
1439                 bp->num_queues = bnx2x_calc_num_queues(bp);
1440                 break;
1441
1442         default:
1443                 bp->num_queues = 1;
1444                 break;
1445         }
1446
1447         /* Add special queues */
1448         bp->num_queues += NON_ETH_CONTEXT_USE;
1449 }
1450
1451 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1452 {
1453         int rc, tx, rx;
1454
1455         tx = MAX_TXQS_PER_COS * bp->max_cos;
1456         rx = BNX2X_NUM_ETH_QUEUES(bp);
1457
1458 /* account for fcoe queue */
1459 #ifdef BCM_CNIC
1460         if (!NO_FCOE(bp)) {
1461                 rx += FCOE_PRESENT;
1462                 tx += FCOE_PRESENT;
1463         }
1464 #endif
1465
1466         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1467         if (rc) {
1468                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1469                 return rc;
1470         }
1471         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1472         if (rc) {
1473                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1474                 return rc;
1475         }
1476
1477         DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1478                           tx, rx);
1479
1480         return rc;
1481 }
1482
1483 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1484 {
1485         int i;
1486
1487         for_each_queue(bp, i) {
1488                 struct bnx2x_fastpath *fp = &bp->fp[i];
1489
1490                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1491                 if (IS_FCOE_IDX(i))
1492                         /*
1493                          * Although there are no IP frames expected to arrive to
1494                          * this ring we still want to add an
1495                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1496                          * overrun attack.
1497                          */
1498                         fp->rx_buf_size =
1499                                 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1500                                 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1501                 else
1502                         fp->rx_buf_size =
1503                                 bp->dev->mtu + ETH_OVREHEAD +
1504                                 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1505         }
1506 }
1507
1508 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1509 {
1510         int i;
1511         u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1512         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1513
1514         /*
1515          * Prepare the inital contents fo the indirection table if RSS is
1516          * enabled
1517          */
1518         if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1519                 for (i = 0; i < sizeof(ind_table); i++)
1520                         ind_table[i] =
1521                                 bp->fp->cl_id + (i % num_eth_queues);
1522         }
1523
1524         /*
1525          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1526          * per-port, so if explicit configuration is needed , do it only
1527          * for a PMF.
1528          *
1529          * For 57712 and newer on the other hand it's a per-function
1530          * configuration.
1531          */
1532         return bnx2x_config_rss_pf(bp, ind_table,
1533                                    bp->port.pmf || !CHIP_IS_E1x(bp));
1534 }
1535
1536 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1537 {
1538         struct bnx2x_config_rss_params params = {0};
1539         int i;
1540
1541         /* Although RSS is meaningless when there is a single HW queue we
1542          * still need it enabled in order to have HW Rx hash generated.
1543          *
1544          * if (!is_eth_multi(bp))
1545          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1546          */
1547
1548         params.rss_obj = &bp->rss_conf_obj;
1549
1550         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1551
1552         /* RSS mode */
1553         switch (bp->multi_mode) {
1554         case ETH_RSS_MODE_DISABLED:
1555                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1556                 break;
1557         case ETH_RSS_MODE_REGULAR:
1558                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1559                 break;
1560         case ETH_RSS_MODE_VLAN_PRI:
1561                 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1562                 break;
1563         case ETH_RSS_MODE_E1HOV_PRI:
1564                 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1565                 break;
1566         case ETH_RSS_MODE_IP_DSCP:
1567                 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1568                 break;
1569         default:
1570                 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1571                 return -EINVAL;
1572         }
1573
1574         /* If RSS is enabled */
1575         if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1576                 /* RSS configuration */
1577                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1578                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1579                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1580                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1581
1582                 /* Hash bits */
1583                 params.rss_result_mask = MULTI_MASK;
1584
1585                 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1586
1587                 if (config_hash) {
1588                         /* RSS keys */
1589                         for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1590                                 params.rss_key[i] = random32();
1591
1592                         __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1593                 }
1594         }
1595
1596         return bnx2x_config_rss(bp, &params);
1597 }
1598
1599 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1600 {
1601         struct bnx2x_func_state_params func_params = {0};
1602
1603         /* Prepare parameters for function state transitions */
1604         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1605
1606         func_params.f_obj = &bp->func_obj;
1607         func_params.cmd = BNX2X_F_CMD_HW_INIT;
1608
1609         func_params.params.hw_init.load_phase = load_code;
1610
1611         return bnx2x_func_state_change(bp, &func_params);
1612 }
1613
1614 /*
1615  * Cleans the object that have internal lists without sending
1616  * ramrods. Should be run when interrutps are disabled.
1617  */
1618 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1619 {
1620         int rc;
1621         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1622         struct bnx2x_mcast_ramrod_params rparam = {0};
1623         struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1624
1625         /***************** Cleanup MACs' object first *************************/
1626
1627         /* Wait for completion of requested */
1628         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1629         /* Perform a dry cleanup */
1630         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1631
1632         /* Clean ETH primary MAC */
1633         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1634         rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1635                                  &ramrod_flags);
1636         if (rc != 0)
1637                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1638
1639         /* Cleanup UC list */
1640         vlan_mac_flags = 0;
1641         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1642         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1643                                  &ramrod_flags);
1644         if (rc != 0)
1645                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1646
1647         /***************** Now clean mcast object *****************************/
1648         rparam.mcast_obj = &bp->mcast_obj;
1649         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1650
1651         /* Add a DEL command... */
1652         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1653         if (rc < 0)
1654                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1655                           "object: %d\n", rc);
1656
1657         /* ...and wait until all pending commands are cleared */
1658         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1659         while (rc != 0) {
1660                 if (rc < 0) {
1661                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1662                                   rc);
1663                         return;
1664                 }
1665
1666                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1667         }
1668 }
1669
1670 #ifndef BNX2X_STOP_ON_ERROR
1671 #define LOAD_ERROR_EXIT(bp, label) \
1672         do { \
1673                 (bp)->state = BNX2X_STATE_ERROR; \
1674                 goto label; \
1675         } while (0)
1676 #else
1677 #define LOAD_ERROR_EXIT(bp, label) \
1678         do { \
1679                 (bp)->state = BNX2X_STATE_ERROR; \
1680                 (bp)->panic = 1; \
1681                 return -EBUSY; \
1682         } while (0)
1683 #endif
1684
1685 /* must be called with rtnl_lock */
1686 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1687 {
1688         int port = BP_PORT(bp);
1689         u32 load_code;
1690         int i, rc;
1691
1692 #ifdef BNX2X_STOP_ON_ERROR
1693         if (unlikely(bp->panic))
1694                 return -EPERM;
1695 #endif
1696
1697         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1698
1699         /* Set the initial link reported state to link down */
1700         bnx2x_acquire_phy_lock(bp);
1701         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1702         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1703                 &bp->last_reported_link.link_report_flags);
1704         bnx2x_release_phy_lock(bp);
1705
1706         /* must be called before memory allocation and HW init */
1707         bnx2x_ilt_set_info(bp);
1708
1709         /*
1710          * Zero fastpath structures preserving invariants like napi, which are
1711          * allocated only once, fp index, max_cos, bp pointer.
1712          * Also set fp->disable_tpa.
1713          */
1714         for_each_queue(bp, i)
1715                 bnx2x_bz_fp(bp, i);
1716
1717
1718         /* Set the receive queues buffer size */
1719         bnx2x_set_rx_buf_size(bp);
1720
1721         if (bnx2x_alloc_mem(bp))
1722                 return -ENOMEM;
1723
1724         /* As long as bnx2x_alloc_mem() may possibly update
1725          * bp->num_queues, bnx2x_set_real_num_queues() should always
1726          * come after it.
1727          */
1728         rc = bnx2x_set_real_num_queues(bp);
1729         if (rc) {
1730                 BNX2X_ERR("Unable to set real_num_queues\n");
1731                 LOAD_ERROR_EXIT(bp, load_error0);
1732         }
1733
1734         /* configure multi cos mappings in kernel.
1735          * this configuration may be overriden by a multi class queue discipline
1736          * or by a dcbx negotiation result.
1737          */
1738         bnx2x_setup_tc(bp->dev, bp->max_cos);
1739
1740         bnx2x_napi_enable(bp);
1741
1742         /* Send LOAD_REQUEST command to MCP
1743          * Returns the type of LOAD command:
1744          * if it is the first port to be initialized
1745          * common blocks should be initialized, otherwise - not
1746          */
1747         if (!BP_NOMCP(bp)) {
1748                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1749                 if (!load_code) {
1750                         BNX2X_ERR("MCP response failure, aborting\n");
1751                         rc = -EBUSY;
1752                         LOAD_ERROR_EXIT(bp, load_error1);
1753                 }
1754                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1755                         rc = -EBUSY; /* other port in diagnostic mode */
1756                         LOAD_ERROR_EXIT(bp, load_error1);
1757                 }
1758
1759         } else {
1760                 int path = BP_PATH(bp);
1761
1762                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1763                    path, load_count[path][0], load_count[path][1],
1764                    load_count[path][2]);
1765                 load_count[path][0]++;
1766                 load_count[path][1 + port]++;
1767                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1768                    path, load_count[path][0], load_count[path][1],
1769                    load_count[path][2]);
1770                 if (load_count[path][0] == 1)
1771                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1772                 else if (load_count[path][1 + port] == 1)
1773                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1774                 else
1775                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1776         }
1777
1778         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1779             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1780             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1781                 bp->port.pmf = 1;
1782                 /*
1783                  * We need the barrier to ensure the ordering between the
1784                  * writing to bp->port.pmf here and reading it from the
1785                  * bnx2x_periodic_task().
1786                  */
1787                 smp_mb();
1788                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1789         } else
1790                 bp->port.pmf = 0;
1791
1792         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1793
1794         /* Init Function state controlling object */
1795         bnx2x__init_func_obj(bp);
1796
1797         /* Initialize HW */
1798         rc = bnx2x_init_hw(bp, load_code);
1799         if (rc) {
1800                 BNX2X_ERR("HW init failed, aborting\n");
1801                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1802                 LOAD_ERROR_EXIT(bp, load_error2);
1803         }
1804
1805         /* Connect to IRQs */
1806         rc = bnx2x_setup_irqs(bp);
1807         if (rc) {
1808                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1809                 LOAD_ERROR_EXIT(bp, load_error2);
1810         }
1811
1812         /* Setup NIC internals and enable interrupts */
1813         bnx2x_nic_init(bp, load_code);
1814
1815         /* Init per-function objects */
1816         bnx2x_init_bp_objs(bp);
1817
1818         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1819             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1820             (bp->common.shmem2_base)) {
1821                 if (SHMEM2_HAS(bp, dcc_support))
1822                         SHMEM2_WR(bp, dcc_support,
1823                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1824                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1825         }
1826
1827         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1828         rc = bnx2x_func_start(bp);
1829         if (rc) {
1830                 BNX2X_ERR("Function start failed!\n");
1831                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1832                 LOAD_ERROR_EXIT(bp, load_error3);
1833         }
1834
1835         /* Send LOAD_DONE command to MCP */
1836         if (!BP_NOMCP(bp)) {
1837                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1838                 if (!load_code) {
1839                         BNX2X_ERR("MCP response failure, aborting\n");
1840                         rc = -EBUSY;
1841                         LOAD_ERROR_EXIT(bp, load_error3);
1842                 }
1843         }
1844
1845         rc = bnx2x_setup_leading(bp);
1846         if (rc) {
1847                 BNX2X_ERR("Setup leading failed!\n");
1848                 LOAD_ERROR_EXIT(bp, load_error3);
1849         }
1850
1851 #ifdef BCM_CNIC
1852         /* Enable Timer scan */
1853         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1854 #endif
1855
1856         for_each_nondefault_queue(bp, i) {
1857                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1858                 if (rc)
1859                         LOAD_ERROR_EXIT(bp, load_error4);
1860         }
1861
1862         rc = bnx2x_init_rss_pf(bp);
1863         if (rc)
1864                 LOAD_ERROR_EXIT(bp, load_error4);
1865
1866         /* Now when Clients are configured we are ready to work */
1867         bp->state = BNX2X_STATE_OPEN;
1868
1869         /* Configure a ucast MAC */
1870         rc = bnx2x_set_eth_mac(bp, true);
1871         if (rc)
1872                 LOAD_ERROR_EXIT(bp, load_error4);
1873
1874         if (bp->pending_max) {
1875                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1876                 bp->pending_max = 0;
1877         }
1878
1879         if (bp->port.pmf)
1880                 bnx2x_initial_phy_init(bp, load_mode);
1881
1882         /* Start fast path */
1883
1884         /* Initialize Rx filter. */
1885         netif_addr_lock_bh(bp->dev);
1886         bnx2x_set_rx_mode(bp->dev);
1887         netif_addr_unlock_bh(bp->dev);
1888
1889         /* Start the Tx */
1890         switch (load_mode) {
1891         case LOAD_NORMAL:
1892                 /* Tx queue should be only reenabled */
1893                 netif_tx_wake_all_queues(bp->dev);
1894                 break;
1895
1896         case LOAD_OPEN:
1897                 netif_tx_start_all_queues(bp->dev);
1898                 smp_mb__after_clear_bit();
1899                 break;
1900
1901         case LOAD_DIAG:
1902                 bp->state = BNX2X_STATE_DIAG;
1903                 break;
1904
1905         default:
1906                 break;
1907         }
1908
1909         if (!bp->port.pmf)
1910                 bnx2x__link_status_update(bp);
1911
1912         /* start the timer */
1913         mod_timer(&bp->timer, jiffies + bp->current_interval);
1914
1915 #ifdef BCM_CNIC
1916         bnx2x_setup_cnic_irq_info(bp);
1917         if (bp->state == BNX2X_STATE_OPEN)
1918                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1919 #endif
1920         bnx2x_inc_load_cnt(bp);
1921
1922         /* Wait for all pending SP commands to complete */
1923         if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1924                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1925                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1926                 return -EBUSY;
1927         }
1928
1929         bnx2x_dcbx_init(bp);
1930         return 0;
1931
1932 #ifndef BNX2X_STOP_ON_ERROR
1933 load_error4:
1934 #ifdef BCM_CNIC
1935         /* Disable Timer scan */
1936         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
1937 #endif
1938 load_error3:
1939         bnx2x_int_disable_sync(bp, 1);
1940
1941         /* Clean queueable objects */
1942         bnx2x_squeeze_objects(bp);
1943
1944         /* Free SKBs, SGEs, TPA pool and driver internals */
1945         bnx2x_free_skbs(bp);
1946         for_each_rx_queue(bp, i)
1947                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1948
1949         /* Release IRQs */
1950         bnx2x_free_irq(bp);
1951 load_error2:
1952         if (!BP_NOMCP(bp)) {
1953                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1954                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1955         }
1956
1957         bp->port.pmf = 0;
1958 load_error1:
1959         bnx2x_napi_disable(bp);
1960 load_error0:
1961         bnx2x_free_mem(bp);
1962
1963         return rc;
1964 #endif /* ! BNX2X_STOP_ON_ERROR */
1965 }
1966
1967 /* must be called with rtnl_lock */
1968 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1969 {
1970         int i;
1971         bool global = false;
1972
1973         if ((bp->state == BNX2X_STATE_CLOSED) ||
1974             (bp->state == BNX2X_STATE_ERROR)) {
1975                 /* We can get here if the driver has been unloaded
1976                  * during parity error recovery and is either waiting for a
1977                  * leader to complete or for other functions to unload and
1978                  * then ifdown has been issued. In this case we want to
1979                  * unload and let other functions to complete a recovery
1980                  * process.
1981                  */
1982                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1983                 bp->is_leader = 0;
1984                 bnx2x_release_leader_lock(bp);
1985                 smp_mb();
1986
1987                 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
1988
1989                 return -EINVAL;
1990         }
1991
1992         /*
1993          * It's important to set the bp->state to the value different from
1994          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
1995          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
1996          */
1997         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1998         smp_mb();
1999
2000         /* Stop Tx */
2001         bnx2x_tx_disable(bp);
2002
2003 #ifdef BCM_CNIC
2004         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2005 #endif
2006
2007         bp->rx_mode = BNX2X_RX_MODE_NONE;
2008
2009         del_timer_sync(&bp->timer);
2010
2011         /* Set ALWAYS_ALIVE bit in shmem */
2012         bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2013
2014         bnx2x_drv_pulse(bp);
2015
2016         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2017
2018         /* Cleanup the chip if needed */
2019         if (unload_mode != UNLOAD_RECOVERY)
2020                 bnx2x_chip_cleanup(bp, unload_mode);
2021         else {
2022                 /* Send the UNLOAD_REQUEST to the MCP */
2023                 bnx2x_send_unload_req(bp, unload_mode);
2024
2025                 /*
2026                  * Prevent transactions to host from the functions on the
2027                  * engine that doesn't reset global blocks in case of global
2028                  * attention once gloabl blocks are reset and gates are opened
2029                  * (the engine which leader will perform the recovery
2030                  * last).
2031                  */
2032                 if (!CHIP_IS_E1x(bp))
2033                         bnx2x_pf_disable(bp);
2034
2035                 /* Disable HW interrupts, NAPI */
2036                 bnx2x_netif_stop(bp, 1);
2037
2038                 /* Release IRQs */
2039                 bnx2x_free_irq(bp);
2040
2041                 /* Report UNLOAD_DONE to MCP */
2042                 bnx2x_send_unload_done(bp);
2043         }
2044
2045         /*
2046          * At this stage no more interrupts will arrive so we may safly clean
2047          * the queueable objects here in case they failed to get cleaned so far.
2048          */
2049         bnx2x_squeeze_objects(bp);
2050
2051         /* There should be no more pending SP commands at this stage */
2052         bp->sp_state = 0;
2053
2054         bp->port.pmf = 0;
2055
2056         /* Free SKBs, SGEs, TPA pool and driver internals */
2057         bnx2x_free_skbs(bp);
2058         for_each_rx_queue(bp, i)
2059                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2060
2061         bnx2x_free_mem(bp);
2062
2063         bp->state = BNX2X_STATE_CLOSED;
2064
2065         /* Check if there are pending parity attentions. If there are - set
2066          * RECOVERY_IN_PROGRESS.
2067          */
2068         if (bnx2x_chk_parity_attn(bp, &global, false)) {
2069                 bnx2x_set_reset_in_progress(bp);
2070
2071                 /* Set RESET_IS_GLOBAL if needed */
2072                 if (global)
2073                         bnx2x_set_reset_global(bp);
2074         }
2075
2076
2077         /* The last driver must disable a "close the gate" if there is no
2078          * parity attention or "process kill" pending.
2079          */
2080         if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2081                 bnx2x_disable_close_the_gate(bp);
2082
2083         return 0;
2084 }
2085
2086 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2087 {
2088         u16 pmcsr;
2089
2090         /* If there is no power capability, silently succeed */
2091         if (!bp->pm_cap) {
2092                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2093                 return 0;
2094         }
2095
2096         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2097
2098         switch (state) {
2099         case PCI_D0:
2100                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2101                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2102                                        PCI_PM_CTRL_PME_STATUS));
2103
2104                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2105                         /* delay required during transition out of D3hot */
2106                         msleep(20);
2107                 break;
2108
2109         case PCI_D3hot:
2110                 /* If there are other clients above don't
2111                    shut down the power */
2112                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2113                         return 0;
2114                 /* Don't shut down the power for emulation and FPGA */
2115                 if (CHIP_REV_IS_SLOW(bp))
2116                         return 0;
2117
2118                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2119                 pmcsr |= 3;
2120
2121                 if (bp->wol)
2122                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2123
2124                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2125                                       pmcsr);
2126
2127                 /* No more memory access after this point until
2128                 * device is brought back to D0.
2129                 */
2130                 break;
2131
2132         default:
2133                 return -EINVAL;
2134         }
2135         return 0;
2136 }
2137
2138 /*
2139  * net_device service functions
2140  */
2141 int bnx2x_poll(struct napi_struct *napi, int budget)
2142 {
2143         int work_done = 0;
2144         u8 cos;
2145         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2146                                                  napi);
2147         struct bnx2x *bp = fp->bp;
2148
2149         while (1) {
2150 #ifdef BNX2X_STOP_ON_ERROR
2151                 if (unlikely(bp->panic)) {
2152                         napi_complete(napi);
2153                         return 0;
2154                 }
2155 #endif
2156
2157                 for_each_cos_in_tx_queue(fp, cos)
2158                         if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2159                                 bnx2x_tx_int(bp, &fp->txdata[cos]);
2160
2161
2162                 if (bnx2x_has_rx_work(fp)) {
2163                         work_done += bnx2x_rx_int(fp, budget - work_done);
2164
2165                         /* must not complete if we consumed full budget */
2166                         if (work_done >= budget)
2167                                 break;
2168                 }
2169
2170                 /* Fall out from the NAPI loop if needed */
2171                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2172 #ifdef BCM_CNIC
2173                         /* No need to update SB for FCoE L2 ring as long as
2174                          * it's connected to the default SB and the SB
2175                          * has been updated when NAPI was scheduled.
2176                          */
2177                         if (IS_FCOE_FP(fp)) {
2178                                 napi_complete(napi);
2179                                 break;
2180                         }
2181 #endif
2182
2183                         bnx2x_update_fpsb_idx(fp);
2184                         /* bnx2x_has_rx_work() reads the status block,
2185                          * thus we need to ensure that status block indices
2186                          * have been actually read (bnx2x_update_fpsb_idx)
2187                          * prior to this check (bnx2x_has_rx_work) so that
2188                          * we won't write the "newer" value of the status block
2189                          * to IGU (if there was a DMA right after
2190                          * bnx2x_has_rx_work and if there is no rmb, the memory
2191                          * reading (bnx2x_update_fpsb_idx) may be postponed
2192                          * to right before bnx2x_ack_sb). In this case there
2193                          * will never be another interrupt until there is
2194                          * another update of the status block, while there
2195                          * is still unhandled work.
2196                          */
2197                         rmb();
2198
2199                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2200                                 napi_complete(napi);
2201                                 /* Re-enable interrupts */
2202                                 DP(NETIF_MSG_HW,
2203                                    "Update index to %d\n", fp->fp_hc_idx);
2204                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2205                                              le16_to_cpu(fp->fp_hc_idx),
2206                                              IGU_INT_ENABLE, 1);
2207                                 break;
2208                         }
2209                 }
2210         }
2211
2212         return work_done;
2213 }
2214
2215 /* we split the first BD into headers and data BDs
2216  * to ease the pain of our fellow microcode engineers
2217  * we use one mapping for both BDs
2218  * So far this has only been observed to happen
2219  * in Other Operating Systems(TM)
2220  */
2221 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2222                                    struct bnx2x_fp_txdata *txdata,
2223                                    struct sw_tx_bd *tx_buf,
2224                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
2225                                    u16 bd_prod, int nbd)
2226 {
2227         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2228         struct eth_tx_bd *d_tx_bd;
2229         dma_addr_t mapping;
2230         int old_len = le16_to_cpu(h_tx_bd->nbytes);
2231
2232         /* first fix first BD */
2233         h_tx_bd->nbd = cpu_to_le16(nbd);
2234         h_tx_bd->nbytes = cpu_to_le16(hlen);
2235
2236         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2237            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2238            h_tx_bd->addr_lo, h_tx_bd->nbd);
2239
2240         /* now get a new data BD
2241          * (after the pbd) and fill it */
2242         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2243         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2244
2245         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2246                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2247
2248         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2249         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2250         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2251
2252         /* this marks the BD as one that has no individual mapping */
2253         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2254
2255         DP(NETIF_MSG_TX_QUEUED,
2256            "TSO split data size is %d (%x:%x)\n",
2257            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2258
2259         /* update tx_bd */
2260         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2261
2262         return bd_prod;
2263 }
2264
2265 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2266 {
2267         if (fix > 0)
2268                 csum = (u16) ~csum_fold(csum_sub(csum,
2269                                 csum_partial(t_header - fix, fix, 0)));
2270
2271         else if (fix < 0)
2272                 csum = (u16) ~csum_fold(csum_add(csum,
2273                                 csum_partial(t_header, -fix, 0)));
2274
2275         return swab16(csum);
2276 }
2277
2278 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2279 {
2280         u32 rc;
2281
2282         if (skb->ip_summed != CHECKSUM_PARTIAL)
2283                 rc = XMIT_PLAIN;
2284
2285         else {
2286                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2287                         rc = XMIT_CSUM_V6;
2288                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2289                                 rc |= XMIT_CSUM_TCP;
2290
2291                 } else {
2292                         rc = XMIT_CSUM_V4;
2293                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2294                                 rc |= XMIT_CSUM_TCP;
2295                 }
2296         }
2297
2298         if (skb_is_gso_v6(skb))
2299                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2300         else if (skb_is_gso(skb))
2301                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2302
2303         return rc;
2304 }
2305
2306 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2307 /* check if packet requires linearization (packet is too fragmented)
2308    no need to check fragmentation if page size > 8K (there will be no
2309    violation to FW restrictions) */
2310 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2311                              u32 xmit_type)
2312 {
2313         int to_copy = 0;
2314         int hlen = 0;
2315         int first_bd_sz = 0;
2316
2317         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2318         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2319
2320                 if (xmit_type & XMIT_GSO) {
2321                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2322                         /* Check if LSO packet needs to be copied:
2323                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2324                         int wnd_size = MAX_FETCH_BD - 3;
2325                         /* Number of windows to check */
2326                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2327                         int wnd_idx = 0;
2328                         int frag_idx = 0;
2329                         u32 wnd_sum = 0;
2330
2331                         /* Headers length */
2332                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2333                                 tcp_hdrlen(skb);
2334
2335                         /* Amount of data (w/o headers) on linear part of SKB*/
2336                         first_bd_sz = skb_headlen(skb) - hlen;
2337
2338                         wnd_sum  = first_bd_sz;
2339
2340                         /* Calculate the first sum - it's special */
2341                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2342                                 wnd_sum +=
2343                                         skb_shinfo(skb)->frags[frag_idx].size;
2344
2345                         /* If there was data on linear skb data - check it */
2346                         if (first_bd_sz > 0) {
2347                                 if (unlikely(wnd_sum < lso_mss)) {
2348                                         to_copy = 1;
2349                                         goto exit_lbl;
2350                                 }
2351
2352                                 wnd_sum -= first_bd_sz;
2353                         }
2354
2355                         /* Others are easier: run through the frag list and
2356                            check all windows */
2357                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2358                                 wnd_sum +=
2359                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2360
2361                                 if (unlikely(wnd_sum < lso_mss)) {
2362                                         to_copy = 1;
2363                                         break;
2364                                 }
2365                                 wnd_sum -=
2366                                         skb_shinfo(skb)->frags[wnd_idx].size;
2367                         }
2368                 } else {
2369                         /* in non-LSO too fragmented packet should always
2370                            be linearized */
2371                         to_copy = 1;
2372                 }
2373         }
2374
2375 exit_lbl:
2376         if (unlikely(to_copy))
2377                 DP(NETIF_MSG_TX_QUEUED,
2378                    "Linearization IS REQUIRED for %s packet. "
2379                    "num_frags %d  hlen %d  first_bd_sz %d\n",
2380                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2381                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2382
2383         return to_copy;
2384 }
2385 #endif
2386
2387 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2388                                         u32 xmit_type)
2389 {
2390         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2391                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2392                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2393         if ((xmit_type & XMIT_GSO_V6) &&
2394             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2395                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2396 }
2397
2398 /**
2399  * bnx2x_set_pbd_gso - update PBD in GSO case.
2400  *
2401  * @skb:        packet skb
2402  * @pbd:        parse BD
2403  * @xmit_type:  xmit flags
2404  */
2405 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2406                                      struct eth_tx_parse_bd_e1x *pbd,
2407                                      u32 xmit_type)
2408 {
2409         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2410         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2411         pbd->tcp_flags = pbd_tcp_flags(skb);
2412
2413         if (xmit_type & XMIT_GSO_V4) {
2414                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2415                 pbd->tcp_pseudo_csum =
2416                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2417                                                   ip_hdr(skb)->daddr,
2418                                                   0, IPPROTO_TCP, 0));
2419
2420         } else
2421                 pbd->tcp_pseudo_csum =
2422                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2423                                                 &ipv6_hdr(skb)->daddr,
2424                                                 0, IPPROTO_TCP, 0));
2425
2426         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2427 }
2428
2429 /**
2430  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2431  *
2432  * @bp:                 driver handle
2433  * @skb:                packet skb
2434  * @parsing_data:       data to be updated
2435  * @xmit_type:          xmit flags
2436  *
2437  * 57712 related
2438  */
2439 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2440         u32 *parsing_data, u32 xmit_type)
2441 {
2442         *parsing_data |=
2443                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2444                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2445                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2446
2447         if (xmit_type & XMIT_CSUM_TCP) {
2448                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2449                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2450                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2451
2452                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2453         } else
2454                 /* We support checksum offload for TCP and UDP only.
2455                  * No need to pass the UDP header length - it's a constant.
2456                  */
2457                 return skb_transport_header(skb) +
2458                                 sizeof(struct udphdr) - skb->data;
2459 }
2460
2461 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2462         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2463 {
2464         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2465
2466         if (xmit_type & XMIT_CSUM_V4)
2467                 tx_start_bd->bd_flags.as_bitfield |=
2468                                         ETH_TX_BD_FLAGS_IP_CSUM;
2469         else
2470                 tx_start_bd->bd_flags.as_bitfield |=
2471                                         ETH_TX_BD_FLAGS_IPV6;
2472
2473         if (!(xmit_type & XMIT_CSUM_TCP))
2474                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2475 }
2476
2477 /**
2478  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2479  *
2480  * @bp:         driver handle
2481  * @skb:        packet skb
2482  * @pbd:        parse BD to be updated
2483  * @xmit_type:  xmit flags
2484  */
2485 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2486         struct eth_tx_parse_bd_e1x *pbd,
2487         u32 xmit_type)
2488 {
2489         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2490
2491         /* for now NS flag is not used in Linux */
2492         pbd->global_data =
2493                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2494                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2495
2496         pbd->ip_hlen_w = (skb_transport_header(skb) -
2497                         skb_network_header(skb)) >> 1;
2498
2499         hlen += pbd->ip_hlen_w;
2500
2501         /* We support checksum offload for TCP and UDP only */
2502         if (xmit_type & XMIT_CSUM_TCP)
2503                 hlen += tcp_hdrlen(skb) / 2;
2504         else
2505                 hlen += sizeof(struct udphdr) / 2;
2506
2507         pbd->total_hlen_w = cpu_to_le16(hlen);
2508         hlen = hlen*2;
2509
2510         if (xmit_type & XMIT_CSUM_TCP) {
2511                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2512
2513         } else {
2514                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2515
2516                 DP(NETIF_MSG_TX_QUEUED,
2517                    "hlen %d  fix %d  csum before fix %x\n",
2518                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2519
2520                 /* HW bug: fixup the CSUM */
2521                 pbd->tcp_pseudo_csum =
2522                         bnx2x_csum_fix(skb_transport_header(skb),
2523                                        SKB_CS(skb), fix);
2524
2525                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2526                    pbd->tcp_pseudo_csum);
2527         }
2528
2529         return hlen;
2530 }
2531
2532 /* called with netif_tx_lock
2533  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2534  * netif_wake_queue()
2535  */
2536 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2537 {
2538         struct bnx2x *bp = netdev_priv(dev);
2539
2540         struct bnx2x_fastpath *fp;
2541         struct netdev_queue *txq;
2542         struct bnx2x_fp_txdata *txdata;
2543         struct sw_tx_bd *tx_buf;
2544         struct eth_tx_start_bd *tx_start_bd, *first_bd;
2545         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2546         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2547         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2548         u32 pbd_e2_parsing_data = 0;
2549         u16 pkt_prod, bd_prod;
2550         int nbd, txq_index, fp_index, txdata_index;
2551         dma_addr_t mapping;
2552         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2553         int i;
2554         u8 hlen = 0;
2555         __le16 pkt_size = 0;
2556         struct ethhdr *eth;
2557         u8 mac_type = UNICAST_ADDRESS;
2558
2559 #ifdef BNX2X_STOP_ON_ERROR
2560         if (unlikely(bp->panic))
2561                 return NETDEV_TX_BUSY;
2562 #endif
2563
2564         txq_index = skb_get_queue_mapping(skb);
2565         txq = netdev_get_tx_queue(dev, txq_index);
2566
2567         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2568
2569         /* decode the fastpath index and the cos index from the txq */
2570         fp_index = TXQ_TO_FP(txq_index);
2571         txdata_index = TXQ_TO_COS(txq_index);
2572
2573 #ifdef BCM_CNIC
2574         /*
2575          * Override the above for the FCoE queue:
2576          *   - FCoE fp entry is right after the ETH entries.
2577          *   - FCoE L2 queue uses bp->txdata[0] only.
2578          */
2579         if (unlikely(!NO_FCOE(bp) && (txq_index ==
2580                                       bnx2x_fcoe_tx(bp, txq_index)))) {
2581                 fp_index = FCOE_IDX;
2582                 txdata_index = 0;
2583         }
2584 #endif
2585
2586         /* enable this debug print to view the transmission queue being used
2587         DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2588            txq_index, fp_index, txdata_index); */
2589
2590         /* locate the fastpath and the txdata */
2591         fp = &bp->fp[fp_index];
2592         txdata = &fp->txdata[txdata_index];
2593
2594         /* enable this debug print to view the tranmission details
2595         DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2596                         " tx_data ptr %p fp pointer %p",
2597            txdata->cid, fp_index, txdata_index, txdata, fp); */
2598
2599         if (unlikely(bnx2x_tx_avail(bp, txdata) <
2600                      (skb_shinfo(skb)->nr_frags + 3))) {
2601                 fp->eth_q_stats.driver_xoff++;
2602                 netif_tx_stop_queue(txq);
2603                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2604                 return NETDEV_TX_BUSY;
2605         }
2606
2607         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2608                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2609            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2610            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2611
2612         eth = (struct ethhdr *)skb->data;
2613
2614         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2615         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2616                 if (is_broadcast_ether_addr(eth->h_dest))
2617                         mac_type = BROADCAST_ADDRESS;
2618                 else
2619                         mac_type = MULTICAST_ADDRESS;
2620         }
2621
2622 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2623         /* First, check if we need to linearize the skb (due to FW
2624            restrictions). No need to check fragmentation if page size > 8K
2625            (there will be no violation to FW restrictions) */
2626         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2627                 /* Statistics of linearization */
2628                 bp->lin_cnt++;
2629                 if (skb_linearize(skb) != 0) {
2630                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2631                            "silently dropping this SKB\n");
2632                         dev_kfree_skb_any(skb);
2633                         return NETDEV_TX_OK;
2634                 }
2635         }
2636 #endif
2637         /* Map skb linear data for DMA */
2638         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2639                                  skb_headlen(skb), DMA_TO_DEVICE);
2640         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2641                 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2642                    "silently dropping this SKB\n");
2643                 dev_kfree_skb_any(skb);
2644                 return NETDEV_TX_OK;
2645         }
2646         /*
2647         Please read carefully. First we use one BD which we mark as start,
2648         then we have a parsing info BD (used for TSO or xsum),
2649         and only then we have the rest of the TSO BDs.
2650         (don't forget to mark the last one as last,
2651         and to unmap only AFTER you write to the BD ...)
2652         And above all, all pdb sizes are in words - NOT DWORDS!
2653         */
2654
2655         /* get current pkt produced now - advance it just before sending packet
2656          * since mapping of pages may fail and cause packet to be dropped
2657          */
2658         pkt_prod = txdata->tx_pkt_prod;
2659         bd_prod = TX_BD(txdata->tx_bd_prod);
2660
2661         /* get a tx_buf and first BD
2662          * tx_start_bd may be changed during SPLIT,
2663          * but first_bd will always stay first
2664          */
2665         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2666         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2667         first_bd = tx_start_bd;
2668
2669         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2670         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2671                  mac_type);
2672
2673         /* header nbd */
2674         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2675
2676         /* remember the first BD of the packet */
2677         tx_buf->first_bd = txdata->tx_bd_prod;
2678         tx_buf->skb = skb;
2679         tx_buf->flags = 0;
2680
2681         DP(NETIF_MSG_TX_QUEUED,
2682            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2683            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2684
2685         if (vlan_tx_tag_present(skb)) {
2686                 tx_start_bd->vlan_or_ethertype =
2687                     cpu_to_le16(vlan_tx_tag_get(skb));
2688                 tx_start_bd->bd_flags.as_bitfield |=
2689                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2690         } else
2691                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2692
2693         /* turn on parsing and get a BD */
2694         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2695
2696         if (xmit_type & XMIT_CSUM)
2697                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2698
2699         if (!CHIP_IS_E1x(bp)) {
2700                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2701                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2702                 /* Set PBD in checksum offload case */
2703                 if (xmit_type & XMIT_CSUM)
2704                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2705                                                      &pbd_e2_parsing_data,
2706                                                      xmit_type);
2707                 if (IS_MF_SI(bp)) {
2708                         /*
2709                          * fill in the MAC addresses in the PBD - for local
2710                          * switching
2711                          */
2712                         bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2713                                               &pbd_e2->src_mac_addr_mid,
2714                                               &pbd_e2->src_mac_addr_lo,
2715                                               eth->h_source);
2716                         bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2717                                               &pbd_e2->dst_mac_addr_mid,
2718                                               &pbd_e2->dst_mac_addr_lo,
2719                                               eth->h_dest);
2720                 }
2721         } else {
2722                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2723                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2724                 /* Set PBD in checksum offload case */
2725                 if (xmit_type & XMIT_CSUM)
2726                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2727
2728         }
2729
2730         /* Setup the data pointer of the first BD of the packet */
2731         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2732         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2733         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2734         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2735         pkt_size = tx_start_bd->nbytes;
2736
2737         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2738            "  nbytes %d  flags %x  vlan %x\n",
2739            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2740            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2741            tx_start_bd->bd_flags.as_bitfield,
2742            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2743
2744         if (xmit_type & XMIT_GSO) {
2745
2746                 DP(NETIF_MSG_TX_QUEUED,
2747                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2748                    skb->len, hlen, skb_headlen(skb),
2749                    skb_shinfo(skb)->gso_size);
2750
2751                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2752
2753                 if (unlikely(skb_headlen(skb) > hlen))
2754                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2755                                                  &tx_start_bd, hlen,
2756                                                  bd_prod, ++nbd);
2757                 if (!CHIP_IS_E1x(bp))
2758                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2759                                              xmit_type);
2760                 else
2761                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2762         }
2763
2764         /* Set the PBD's parsing_data field if not zero
2765          * (for the chips newer than 57711).
2766          */
2767         if (pbd_e2_parsing_data)
2768                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2769
2770         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2771
2772         /* Handle fragmented skb */
2773         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2774                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2775
2776                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2777                                        frag->page_offset, frag->size,
2778                                        DMA_TO_DEVICE);
2779                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2780
2781                         DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2782                                                 "dropping packet...\n");
2783
2784                         /* we need unmap all buffers already mapped
2785                          * for this SKB;
2786                          * first_bd->nbd need to be properly updated
2787                          * before call to bnx2x_free_tx_pkt
2788                          */
2789                         first_bd->nbd = cpu_to_le16(nbd);
2790                         bnx2x_free_tx_pkt(bp, txdata,
2791                                           TX_BD(txdata->tx_pkt_prod));
2792                         return NETDEV_TX_OK;
2793                 }
2794
2795                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2796                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2797                 if (total_pkt_bd == NULL)
2798                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2799
2800                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2801                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2802                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2803                 le16_add_cpu(&pkt_size, frag->size);
2804                 nbd++;
2805
2806                 DP(NETIF_MSG_TX_QUEUED,
2807                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2808                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2809                    le16_to_cpu(tx_data_bd->nbytes));
2810         }
2811
2812         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2813
2814         /* update with actual num BDs */
2815         first_bd->nbd = cpu_to_le16(nbd);
2816
2817         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2818
2819         /* now send a tx doorbell, counting the next BD
2820          * if the packet contains or ends with it
2821          */
2822         if (TX_BD_POFF(bd_prod) < nbd)
2823                 nbd++;
2824
2825         /* total_pkt_bytes should be set on the first data BD if
2826          * it's not an LSO packet and there is more than one
2827          * data BD. In this case pkt_size is limited by an MTU value.
2828          * However we prefer to set it for an LSO packet (while we don't
2829          * have to) in order to save some CPU cycles in a none-LSO
2830          * case, when we much more care about them.
2831          */
2832         if (total_pkt_bd != NULL)
2833                 total_pkt_bd->total_pkt_bytes = pkt_size;
2834
2835         if (pbd_e1x)
2836                 DP(NETIF_MSG_TX_QUEUED,
2837                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2838                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2839                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2840                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2841                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2842                     le16_to_cpu(pbd_e1x->total_hlen_w));
2843         if (pbd_e2)
2844                 DP(NETIF_MSG_TX_QUEUED,
2845                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2846                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2847                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2848                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2849                    pbd_e2->parsing_data);
2850         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2851
2852         txdata->tx_pkt_prod++;
2853         /*
2854          * Make sure that the BD data is updated before updating the producer
2855          * since FW might read the BD right after the producer is updated.
2856          * This is only applicable for weak-ordered memory model archs such
2857          * as IA-64. The following barrier is also mandatory since FW will
2858          * assumes packets must have BDs.
2859          */
2860         wmb();
2861
2862         txdata->tx_db.data.prod += nbd;
2863         barrier();
2864
2865         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2866
2867         mmiowb();
2868
2869         txdata->tx_bd_prod += nbd;
2870
2871         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2872                 netif_tx_stop_queue(txq);
2873
2874                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2875                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2876                  * fp->bd_tx_cons */
2877                 smp_mb();
2878
2879                 fp->eth_q_stats.driver_xoff++;
2880                 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2881                         netif_tx_wake_queue(txq);
2882         }
2883         txdata->tx_pkt++;
2884
2885         return NETDEV_TX_OK;
2886 }
2887
2888 /**
2889  * bnx2x_setup_tc - routine to configure net_device for multi tc
2890  *
2891  * @netdev: net device to configure
2892  * @tc: number of traffic classes to enable
2893  *
2894  * callback connected to the ndo_setup_tc function pointer
2895  */
2896 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2897 {
2898         int cos, prio, count, offset;
2899         struct bnx2x *bp = netdev_priv(dev);
2900
2901         /* setup tc must be called under rtnl lock */
2902         ASSERT_RTNL();
2903
2904         /* no traffic classes requested. aborting */
2905         if (!num_tc) {
2906                 netdev_reset_tc(dev);
2907                 return 0;
2908         }
2909
2910         /* requested to support too many traffic classes */
2911         if (num_tc > bp->max_cos) {
2912                 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2913                                      " requested: %d. max supported is %d",
2914                                      num_tc, bp->max_cos);
2915                 return -EINVAL;
2916         }
2917
2918         /* declare amount of supported traffic classes */
2919         if (netdev_set_num_tc(dev, num_tc)) {
2920                 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
2921                                      num_tc);
2922                 return -EINVAL;
2923         }
2924
2925         /* configure priority to traffic class mapping */
2926         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2927                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2928                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
2929                    prio, bp->prio_to_cos[prio]);
2930         }
2931
2932
2933         /* Use this configuration to diffrentiate tc0 from other COSes
2934            This can be used for ets or pfc, and save the effort of setting
2935            up a multio class queue disc or negotiating DCBX with a switch
2936         netdev_set_prio_tc_map(dev, 0, 0);
2937         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2938         for (prio = 1; prio < 16; prio++) {
2939                 netdev_set_prio_tc_map(dev, prio, 1);
2940                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2941         } */
2942
2943         /* configure traffic class to transmission queue mapping */
2944         for (cos = 0; cos < bp->max_cos; cos++) {
2945                 count = BNX2X_NUM_ETH_QUEUES(bp);
2946                 offset = cos * MAX_TXQS_PER_COS;
2947                 netdev_set_tc_queue(dev, cos, count, offset);
2948                 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
2949                    cos, offset, count);
2950         }
2951
2952         return 0;
2953 }
2954
2955 /* called with rtnl_lock */
2956 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2957 {
2958         struct sockaddr *addr = p;
2959         struct bnx2x *bp = netdev_priv(dev);
2960         int rc = 0;
2961
2962         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2963                 return -EINVAL;
2964
2965         if (netif_running(dev))  {
2966                 rc = bnx2x_set_eth_mac(bp, false);
2967                 if (rc)
2968                         return rc;
2969         }
2970
2971         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2972
2973         if (netif_running(dev))
2974                 rc = bnx2x_set_eth_mac(bp, true);
2975
2976         return rc;
2977 }
2978
2979 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2980 {
2981         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2982         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2983         u8 cos;
2984
2985         /* Common */
2986 #ifdef BCM_CNIC
2987         if (IS_FCOE_IDX(fp_index)) {
2988                 memset(sb, 0, sizeof(union host_hc_status_block));
2989                 fp->status_blk_mapping = 0;
2990
2991         } else {
2992 #endif
2993                 /* status blocks */
2994                 if (!CHIP_IS_E1x(bp))
2995                         BNX2X_PCI_FREE(sb->e2_sb,
2996                                        bnx2x_fp(bp, fp_index,
2997                                                 status_blk_mapping),
2998                                        sizeof(struct host_hc_status_block_e2));
2999                 else
3000                         BNX2X_PCI_FREE(sb->e1x_sb,
3001                                        bnx2x_fp(bp, fp_index,
3002                                                 status_blk_mapping),
3003                                        sizeof(struct host_hc_status_block_e1x));
3004 #ifdef BCM_CNIC
3005         }
3006 #endif
3007         /* Rx */
3008         if (!skip_rx_queue(bp, fp_index)) {
3009                 bnx2x_free_rx_bds(fp);
3010
3011                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3012                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3013                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3014                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
3015                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
3016
3017                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3018                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
3019                                sizeof(struct eth_fast_path_rx_cqe) *
3020                                NUM_RCQ_BD);
3021
3022                 /* SGE ring */
3023                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3024                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3025                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
3026                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3027         }
3028
3029         /* Tx */
3030         if (!skip_tx_queue(bp, fp_index)) {
3031                 /* fastpath tx rings: tx_buf tx_desc */
3032                 for_each_cos_in_tx_queue(fp, cos) {
3033                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3034
3035                         DP(BNX2X_MSG_SP,
3036                            "freeing tx memory of fp %d cos %d cid %d",
3037                            fp_index, cos, txdata->cid);
3038
3039                         BNX2X_FREE(txdata->tx_buf_ring);
3040                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
3041                                 txdata->tx_desc_mapping,
3042                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3043                 }
3044         }
3045         /* end of fastpath */
3046 }
3047
3048 void bnx2x_free_fp_mem(struct bnx2x *bp)
3049 {
3050         int i;
3051         for_each_queue(bp, i)
3052                 bnx2x_free_fp_mem_at(bp, i);
3053 }
3054
3055 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3056 {
3057         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3058         if (!CHIP_IS_E1x(bp)) {
3059                 bnx2x_fp(bp, index, sb_index_values) =
3060                         (__le16 *)status_blk.e2_sb->sb.index_values;
3061                 bnx2x_fp(bp, index, sb_running_index) =
3062                         (__le16 *)status_blk.e2_sb->sb.running_index;
3063         } else {
3064                 bnx2x_fp(bp, index, sb_index_values) =
3065                         (__le16 *)status_blk.e1x_sb->sb.index_values;
3066                 bnx2x_fp(bp, index, sb_running_index) =
3067                         (__le16 *)status_blk.e1x_sb->sb.running_index;
3068         }
3069 }
3070
3071 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3072 {
3073         union host_hc_status_block *sb;
3074         struct bnx2x_fastpath *fp = &bp->fp[index];
3075         int ring_size = 0;
3076         u8 cos;
3077
3078         /* if rx_ring_size specified - use it */
3079         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
3080                            MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3081
3082         /* allocate at least number of buffers required by FW */
3083         rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3084                                                     MIN_RX_SIZE_TPA,
3085                                   rx_ring_size);
3086
3087         /* Common */
3088         sb = &bnx2x_fp(bp, index, status_blk);
3089 #ifdef BCM_CNIC
3090         if (!IS_FCOE_IDX(index)) {
3091 #endif
3092                 /* status blocks */
3093                 if (!CHIP_IS_E1x(bp))
3094                         BNX2X_PCI_ALLOC(sb->e2_sb,
3095                                 &bnx2x_fp(bp, index, status_blk_mapping),
3096                                 sizeof(struct host_hc_status_block_e2));
3097                 else
3098                         BNX2X_PCI_ALLOC(sb->e1x_sb,
3099                                 &bnx2x_fp(bp, index, status_blk_mapping),
3100                             sizeof(struct host_hc_status_block_e1x));
3101 #ifdef BCM_CNIC
3102         }
3103 #endif
3104
3105         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3106          * set shortcuts for it.
3107          */
3108         if (!IS_FCOE_IDX(index))
3109                 set_sb_shortcuts(bp, index);
3110
3111         /* Tx */
3112         if (!skip_tx_queue(bp, index)) {
3113                 /* fastpath tx rings: tx_buf tx_desc */
3114                 for_each_cos_in_tx_queue(fp, cos) {
3115                         struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3116
3117                         DP(BNX2X_MSG_SP, "allocating tx memory of "
3118                                          "fp %d cos %d",
3119                            index, cos);
3120
3121                         BNX2X_ALLOC(txdata->tx_buf_ring,
3122                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3123                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3124                                 &txdata->tx_desc_mapping,
3125                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3126                 }
3127         }
3128
3129         /* Rx */
3130         if (!skip_rx_queue(bp, index)) {
3131                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3132                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3133                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3134                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3135                                 &bnx2x_fp(bp, index, rx_desc_mapping),
3136                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3137
3138                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3139                                 &bnx2x_fp(bp, index, rx_comp_mapping),
3140                                 sizeof(struct eth_fast_path_rx_cqe) *
3141                                 NUM_RCQ_BD);
3142
3143                 /* SGE ring */
3144                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3145                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3146                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3147                                 &bnx2x_fp(bp, index, rx_sge_mapping),
3148                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3149                 /* RX BD ring */
3150                 bnx2x_set_next_page_rx_bd(fp);
3151
3152                 /* CQ ring */
3153                 bnx2x_set_next_page_rx_cq(fp);
3154
3155                 /* BDs */
3156                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3157                 if (ring_size < rx_ring_size)
3158                         goto alloc_mem_err;
3159         }
3160
3161         return 0;
3162
3163 /* handles low memory cases */
3164 alloc_mem_err:
3165         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3166                                                 index, ring_size);
3167         /* FW will drop all packets if queue is not big enough,
3168          * In these cases we disable the queue
3169          * Min size is different for OOO, TPA and non-TPA queues
3170          */
3171         if (ring_size < (fp->disable_tpa ?
3172                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3173                         /* release memory allocated for this queue */
3174                         bnx2x_free_fp_mem_at(bp, index);
3175                         return -ENOMEM;
3176         }
3177         return 0;
3178 }
3179
3180 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3181 {
3182         int i;
3183
3184         /**
3185          * 1. Allocate FP for leading - fatal if error
3186          * 2. {CNIC} Allocate FCoE FP - fatal if error
3187          * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3188          * 4. Allocate RSS - fix number of queues if error
3189          */
3190
3191         /* leading */
3192         if (bnx2x_alloc_fp_mem_at(bp, 0))
3193                 return -ENOMEM;
3194
3195 #ifdef BCM_CNIC
3196         if (!NO_FCOE(bp))
3197                 /* FCoE */
3198                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3199                         /* we will fail load process instead of mark
3200                          * NO_FCOE_FLAG
3201                          */
3202                         return -ENOMEM;
3203 #endif
3204
3205         /* RSS */
3206         for_each_nondefault_eth_queue(bp, i)
3207                 if (bnx2x_alloc_fp_mem_at(bp, i))
3208                         break;
3209
3210         /* handle memory failures */
3211         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3212                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3213
3214                 WARN_ON(delta < 0);
3215 #ifdef BCM_CNIC
3216                 /**
3217                  * move non eth FPs next to last eth FP
3218                  * must be done in that order
3219                  * FCOE_IDX < FWD_IDX < OOO_IDX
3220                  */
3221
3222                 /* move FCoE fp even NO_FCOE_FLAG is on */
3223                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3224 #endif
3225                 bp->num_queues -= delta;
3226                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3227                           bp->num_queues + delta, bp->num_queues);
3228         }
3229
3230         return 0;
3231 }
3232
3233 void bnx2x_free_mem_bp(struct bnx2x *bp)
3234 {
3235         kfree(bp->fp);
3236         kfree(bp->msix_table);
3237         kfree(bp->ilt);
3238 }
3239
3240 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3241 {
3242         struct bnx2x_fastpath *fp;
3243         struct msix_entry *tbl;
3244         struct bnx2x_ilt *ilt;
3245         int msix_table_size = 0;
3246
3247         /*
3248          * The biggest MSI-X table we might need is as a maximum number of fast
3249          * path IGU SBs plus default SB (for PF).
3250          */
3251         msix_table_size = bp->igu_sb_cnt + 1;
3252
3253         /* fp array: RSS plus CNIC related L2 queues */
3254         fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3255                      sizeof(*fp), GFP_KERNEL);
3256         if (!fp)
3257                 goto alloc_err;
3258         bp->fp = fp;
3259
3260         /* msix table */
3261         tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
3262         if (!tbl)
3263                 goto alloc_err;
3264         bp->msix_table = tbl;
3265
3266         /* ilt */
3267         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3268         if (!ilt)
3269                 goto alloc_err;
3270         bp->ilt = ilt;
3271
3272         return 0;
3273 alloc_err:
3274         bnx2x_free_mem_bp(bp);
3275         return -ENOMEM;
3276
3277 }
3278
3279 int bnx2x_reload_if_running(struct net_device *dev)
3280 {
3281         struct bnx2x *bp = netdev_priv(dev);
3282
3283         if (unlikely(!netif_running(dev)))
3284                 return 0;
3285
3286         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3287         return bnx2x_nic_load(bp, LOAD_NORMAL);
3288 }
3289
3290 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3291 {
3292         u32 sel_phy_idx = 0;
3293         if (bp->link_params.num_phys <= 1)
3294                 return INT_PHY;
3295
3296         if (bp->link_vars.link_up) {
3297                 sel_phy_idx = EXT_PHY1;
3298                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3299                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3300                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3301                         sel_phy_idx = EXT_PHY2;
3302         } else {
3303
3304                 switch (bnx2x_phy_selection(&bp->link_params)) {
3305                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3306                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3307                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3308                        sel_phy_idx = EXT_PHY1;
3309                        break;
3310                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3311                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3312                        sel_phy_idx = EXT_PHY2;
3313                        break;
3314                 }
3315         }
3316
3317         return sel_phy_idx;
3318
3319 }
3320 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3321 {
3322         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3323         /*
3324          * The selected actived PHY is always after swapping (in case PHY
3325          * swapping is enabled). So when swapping is enabled, we need to reverse
3326          * the configuration
3327          */
3328
3329         if (bp->link_params.multi_phy_config &
3330             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3331                 if (sel_phy_idx == EXT_PHY1)
3332                         sel_phy_idx = EXT_PHY2;
3333                 else if (sel_phy_idx == EXT_PHY2)
3334                         sel_phy_idx = EXT_PHY1;
3335         }
3336         return LINK_CONFIG_IDX(sel_phy_idx);
3337 }
3338
3339 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3340 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3341 {
3342         struct bnx2x *bp = netdev_priv(dev);
3343         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3344
3345         switch (type) {
3346         case NETDEV_FCOE_WWNN:
3347                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3348                                 cp->fcoe_wwn_node_name_lo);
3349                 break;
3350         case NETDEV_FCOE_WWPN:
3351                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3352                                 cp->fcoe_wwn_port_name_lo);
3353                 break;
3354         default:
3355                 return -EINVAL;
3356         }
3357
3358         return 0;
3359 }
3360 #endif
3361
3362 /* called with rtnl_lock */
3363 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3364 {
3365         struct bnx2x *bp = netdev_priv(dev);
3366
3367         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3368                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3369                 return -EAGAIN;
3370         }
3371
3372         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3373             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3374                 return -EINVAL;
3375
3376         /* This does not race with packet allocation
3377          * because the actual alloc size is
3378          * only updated as part of load
3379          */
3380         dev->mtu = new_mtu;
3381
3382         return bnx2x_reload_if_running(dev);
3383 }
3384
3385 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3386 {
3387         struct bnx2x *bp = netdev_priv(dev);
3388
3389         /* TPA requires Rx CSUM offloading */
3390         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3391                 features &= ~NETIF_F_LRO;
3392
3393         return features;
3394 }
3395
3396 int bnx2x_set_features(struct net_device *dev, u32 features)
3397 {
3398         struct bnx2x *bp = netdev_priv(dev);
3399         u32 flags = bp->flags;
3400         bool bnx2x_reload = false;
3401
3402         if (features & NETIF_F_LRO)
3403                 flags |= TPA_ENABLE_FLAG;
3404         else
3405                 flags &= ~TPA_ENABLE_FLAG;
3406
3407         if (features & NETIF_F_LOOPBACK) {
3408                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3409                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
3410                         bnx2x_reload = true;
3411                 }
3412         } else {
3413                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3414                         bp->link_params.loopback_mode = LOOPBACK_NONE;
3415                         bnx2x_reload = true;
3416                 }
3417         }
3418
3419         if (flags ^ bp->flags) {
3420                 bp->flags = flags;
3421                 bnx2x_reload = true;
3422         }
3423
3424         if (bnx2x_reload) {
3425                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3426                         return bnx2x_reload_if_running(dev);
3427                 /* else: bnx2x_nic_load() will be called at end of recovery */
3428         }
3429
3430         return 0;
3431 }
3432
3433 void bnx2x_tx_timeout(struct net_device *dev)
3434 {
3435         struct bnx2x *bp = netdev_priv(dev);
3436
3437 #ifdef BNX2X_STOP_ON_ERROR
3438         if (!bp->panic)
3439                 bnx2x_panic();
3440 #endif
3441
3442         smp_mb__before_clear_bit();
3443         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3444         smp_mb__after_clear_bit();
3445
3446         /* This allows the netif to be shutdown gracefully before resetting */
3447         schedule_delayed_work(&bp->sp_rtnl_task, 0);
3448 }
3449
3450 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3451 {
3452         struct net_device *dev = pci_get_drvdata(pdev);
3453         struct bnx2x *bp;
3454
3455         if (!dev) {
3456                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3457                 return -ENODEV;
3458         }
3459         bp = netdev_priv(dev);
3460
3461         rtnl_lock();
3462
3463         pci_save_state(pdev);
3464
3465         if (!netif_running(dev)) {
3466                 rtnl_unlock();
3467                 return 0;
3468         }
3469
3470         netif_device_detach(dev);
3471
3472         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3473
3474         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3475
3476         rtnl_unlock();
3477
3478         return 0;
3479 }
3480
3481 int bnx2x_resume(struct pci_dev *pdev)
3482 {
3483         struct net_device *dev = pci_get_drvdata(pdev);
3484         struct bnx2x *bp;
3485         int rc;
3486
3487         if (!dev) {
3488                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3489                 return -ENODEV;
3490         }
3491         bp = netdev_priv(dev);
3492
3493         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3494                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3495                 return -EAGAIN;
3496         }
3497
3498         rtnl_lock();
3499
3500         pci_restore_state(pdev);
3501
3502         if (!netif_running(dev)) {
3503                 rtnl_unlock();
3504                 return 0;
3505         }
3506
3507         bnx2x_set_power_state(bp, PCI_D0);
3508         netif_device_attach(dev);
3509
3510         /* Since the chip was reset, clear the FW sequence number */
3511         bp->fw_seq = 0;
3512         rc = bnx2x_nic_load(bp, LOAD_OPEN);
3513
3514         rtnl_unlock();
3515
3516         return rc;
3517 }
3518
3519
3520 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3521                               u32 cid)
3522 {
3523         /* ustorm cxt validation */
3524         cxt->ustorm_ag_context.cdu_usage =
3525                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3526                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3527         /* xcontext validation */
3528         cxt->xstorm_ag_context.cdu_reserved =
3529                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3530                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3531 }
3532
3533 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3534                                              u8 fw_sb_id, u8 sb_index,
3535                                              u8 ticks)
3536 {
3537
3538         u32 addr = BAR_CSTRORM_INTMEM +
3539                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3540         REG_WR8(bp, addr, ticks);
3541         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3542                           port, fw_sb_id, sb_index, ticks);
3543 }
3544
3545 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3546                                              u16 fw_sb_id, u8 sb_index,
3547                                              u8 disable)
3548 {
3549         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3550         u32 addr = BAR_CSTRORM_INTMEM +
3551                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3552         u16 flags = REG_RD16(bp, addr);
3553         /* clear and set */
3554         flags &= ~HC_INDEX_DATA_HC_ENABLED;
3555         flags |= enable_flag;
3556         REG_WR16(bp, addr, flags);
3557         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3558                           port, fw_sb_id, sb_index, disable);
3559 }
3560
3561 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3562                                     u8 sb_index, u8 disable, u16 usec)
3563 {
3564         int port = BP_PORT(bp);
3565         u8 ticks = usec / BNX2X_BTR;
3566
3567         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3568
3569         disable = disable ? 1 : (usec ? 0 : 1);
3570         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3571 }