1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
30 static int bnx2x_setup_irqs(struct bnx2x *bp);
33 * bnx2x_bz_fp - zero content of the fastpath structure.
36 * @index: fastpath index to be zeroed
38 * Makes sure the contents of the bp->fp[index].napi is kept
41 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
48 /* Restore the NAPI object as it has been already initialized */
53 * bnx2x_move_fp - move content of the fastpath structure.
56 * @from: source FP index
57 * @to: destination FP index
59 * Makes sure the contents of the bp->fp[to].napi is kept
62 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
64 struct bnx2x_fastpath *from_fp = &bp->fp[from];
65 struct bnx2x_fastpath *to_fp = &bp->fp[to];
66 struct napi_struct orig_napi = to_fp->napi;
67 /* Move bnx2x_fastpath contents */
68 memcpy(to_fp, from_fp, sizeof(*to_fp));
71 /* Restore the NAPI object as it has been already initialized */
72 to_fp->napi = orig_napi;
75 /* free skb in the packet ring at pos idx
76 * return idx of last bd freed
78 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
81 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
82 struct eth_tx_start_bd *tx_start_bd;
83 struct eth_tx_bd *tx_data_bd;
84 struct sk_buff *skb = tx_buf->skb;
85 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
88 /* prefetch skb end pointer to speedup dev_kfree_skb() */
91 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
95 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
96 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
97 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
98 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
100 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
101 #ifdef BNX2X_STOP_ON_ERROR
102 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
103 BNX2X_ERR("BAD nbd!\n");
107 new_cons = nbd + tx_buf->first_bd;
109 /* Get the next bd */
110 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
112 /* Skip a parse bd... */
114 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116 /* ...and the TSO split header bd since they have no mapping */
117 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
119 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
125 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
126 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
127 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
128 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
130 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
135 dev_kfree_skb_any(skb);
136 tx_buf->first_bd = 0;
142 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
144 struct bnx2x *bp = fp->bp;
145 struct netdev_queue *txq;
146 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
148 #ifdef BNX2X_STOP_ON_ERROR
149 if (unlikely(bp->panic))
153 txq = netdev_get_tx_queue(bp->dev, fp->index);
154 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
155 sw_cons = fp->tx_pkt_cons;
157 while (sw_cons != hw_cons) {
160 pkt_cons = TX_BD(sw_cons);
162 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
164 fp->index, hw_cons, sw_cons, pkt_cons);
166 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
170 fp->tx_pkt_cons = sw_cons;
171 fp->tx_bd_cons = bd_cons;
173 /* Need to make the tx_bd_cons update visible to start_xmit()
174 * before checking for netif_tx_queue_stopped(). Without the
175 * memory barrier, there is a small possibility that
176 * start_xmit() will miss it and cause the queue to be stopped
181 if (unlikely(netif_tx_queue_stopped(txq))) {
182 /* Taking tx_lock() is needed to prevent reenabling the queue
183 * while it's empty. This could have happen if rx_action() gets
184 * suspended in bnx2x_tx_int() after the condition before
185 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
187 * stops the queue->sees fresh tx_bd_cons->releases the queue->
188 * sends some packets consuming the whole queue again->
192 __netif_tx_lock(txq, smp_processor_id());
194 if ((netif_tx_queue_stopped(txq)) &&
195 (bp->state == BNX2X_STATE_OPEN) &&
196 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
197 netif_tx_wake_queue(txq);
199 __netif_tx_unlock(txq);
204 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
207 u16 last_max = fp->last_max_sge;
209 if (SUB_S16(idx, last_max) > 0)
210 fp->last_max_sge = idx;
213 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214 struct eth_fast_path_rx_cqe *fp_cqe)
216 struct bnx2x *bp = fp->bp;
217 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
218 le16_to_cpu(fp_cqe->len_on_bd)) >>
220 u16 last_max, last_elem, first_elem;
227 /* First mark all used pages */
228 for (i = 0; i < sge_len; i++)
229 SGE_MASK_CLEAR_BIT(fp,
230 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
232 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
233 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
235 /* Here we assume that the last SGE index is the biggest */
236 prefetch((void *)(fp->sge_mask));
237 bnx2x_update_last_max_sge(fp,
238 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
240 last_max = RX_SGE(fp->last_max_sge);
241 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
242 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
244 /* If ring is not full */
245 if (last_elem + 1 != first_elem)
248 /* Now update the prod */
249 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
250 if (likely(fp->sge_mask[i]))
253 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
254 delta += RX_SGE_MASK_ELEM_SZ;
258 fp->rx_sge_prod += delta;
259 /* clear page-end entries */
260 bnx2x_clear_sge_mask_next_elems(fp);
263 DP(NETIF_MSG_RX_STATUS,
264 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
265 fp->last_max_sge, fp->rx_sge_prod);
268 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
269 struct sk_buff *skb, u16 cons, u16 prod)
271 struct bnx2x *bp = fp->bp;
272 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
273 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
274 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
277 /* move empty skb from pool to prod and map it */
278 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
279 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
280 fp->rx_buf_size, DMA_FROM_DEVICE);
281 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
283 /* move partial skb from cons to pool (don't unmap yet) */
284 fp->tpa_pool[queue] = *cons_rx_buf;
286 /* mark bin state as start - print error if current state != stop */
287 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
288 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
290 fp->tpa_state[queue] = BNX2X_TPA_START;
292 /* point prod_bd to new skb */
293 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
294 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
296 #ifdef BNX2X_STOP_ON_ERROR
297 fp->tpa_queue_used |= (1 << queue);
298 #ifdef _ASM_GENERIC_INT_L64_H
299 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
301 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
307 /* Timestamp option length allowed for TPA aggregation:
309 * nop nop kind length echo val
311 #define TPA_TSTAMP_OPT_LEN 12
313 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
316 * @parsing_flags: parsing flags from the START CQE
317 * @len_on_bd: total length of the first packet for the
320 * Approximate value of the MSS for this aggregation calculated using
321 * the first packet of it.
323 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
326 /* TPA arrgregation won't have an IP options and TCP options
327 * other than timestamp.
329 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
332 /* Check if there was a TCP timestamp, if there is it's will
333 * always be 12 bytes length: nop nop kind length echo val.
335 * Otherwise FW would close the aggregation.
337 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
338 hdrs_len += TPA_TSTAMP_OPT_LEN;
340 return len_on_bd - hdrs_len;
343 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
345 struct eth_fast_path_rx_cqe *fp_cqe,
346 u16 cqe_idx, u16 parsing_flags)
348 struct sw_rx_page *rx_pg, old_rx_pg;
349 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
350 u32 i, frag_len, frag_size, pages;
354 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
355 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
357 /* This is needed in order to enable forwarding support */
359 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
362 #ifdef BNX2X_STOP_ON_ERROR
363 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
364 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
366 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
367 fp_cqe->pkt_len, len_on_bd);
373 /* Run through the SGL and compose the fragmented skb */
374 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
376 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
378 /* FW gives the indices of the SGE as if the ring is an array
379 (meaning that "next" element will consume 2 indices) */
380 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
381 rx_pg = &fp->rx_page_ring[sge_idx];
384 /* If we fail to allocate a substitute page, we simply stop
385 where we are and drop the whole packet */
386 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
388 fp->eth_q_stats.rx_skb_alloc_failed++;
392 /* Unmap the page as we r going to pass it to the stack */
393 dma_unmap_page(&bp->pdev->dev,
394 dma_unmap_addr(&old_rx_pg, mapping),
395 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
397 /* Add one frag and update the appropriate fields in the skb */
398 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
400 skb->data_len += frag_len;
401 skb->truesize += frag_len;
402 skb->len += frag_len;
404 frag_size -= frag_len;
410 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
411 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
414 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
415 struct sk_buff *skb = rx_buf->skb;
417 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
419 /* Unmap skb in the pool anyway, as we are going to change
420 pool entry status to BNX2X_TPA_STOP even if new skb allocation
422 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
423 fp->rx_buf_size, DMA_FROM_DEVICE);
425 if (likely(new_skb)) {
426 /* fix ip xsum and give it to the stack */
427 /* (no need to map the new skb) */
429 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
432 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
434 #ifdef BNX2X_STOP_ON_ERROR
435 if (pad + len > fp->rx_buf_size) {
436 BNX2X_ERR("skb_put is about to fail... "
437 "pad %d len %d rx_buf_size %d\n",
438 pad, len, fp->rx_buf_size);
444 skb_reserve(skb, pad);
447 skb->protocol = eth_type_trans(skb, bp->dev);
448 skb->ip_summed = CHECKSUM_UNNECESSARY;
453 iph = (struct iphdr *)skb->data;
455 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
458 if (!bnx2x_fill_frag_skb(bp, fp, skb,
459 &cqe->fast_path_cqe, cqe_idx,
461 if (parsing_flags & PARSING_FLAGS_VLAN)
462 __vlan_hwaccel_put_tag(skb,
463 le16_to_cpu(cqe->fast_path_cqe.
465 napi_gro_receive(&fp->napi, skb);
467 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
468 " - dropping packet!\n");
469 dev_kfree_skb_any(skb);
473 /* put new skb in bin */
474 fp->tpa_pool[queue].skb = new_skb;
477 /* else drop the packet and keep the buffer in the bin */
478 DP(NETIF_MSG_RX_STATUS,
479 "Failed to allocate new skb - dropping packet!\n");
480 fp->eth_q_stats.rx_skb_alloc_failed++;
483 fp->tpa_state[queue] = BNX2X_TPA_STOP;
486 /* Set Toeplitz hash value in the skb using the value from the
487 * CQE (calculated by HW).
489 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
492 /* Set Toeplitz hash from CQE */
493 if ((bp->dev->features & NETIF_F_RXHASH) &&
494 (cqe->fast_path_cqe.status_flags &
495 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
497 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
500 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
502 struct bnx2x *bp = fp->bp;
503 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
504 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
507 #ifdef BNX2X_STOP_ON_ERROR
508 if (unlikely(bp->panic))
512 /* CQ "next element" is of the size of the regular element,
513 that's why it's ok here */
514 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
515 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
518 bd_cons = fp->rx_bd_cons;
519 bd_prod = fp->rx_bd_prod;
520 bd_prod_fw = bd_prod;
521 sw_comp_cons = fp->rx_comp_cons;
522 sw_comp_prod = fp->rx_comp_prod;
524 /* Memory barrier necessary as speculative reads of the rx
525 * buffer can be ahead of the index in the status block
529 DP(NETIF_MSG_RX_STATUS,
530 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
531 fp->index, hw_comp_cons, sw_comp_cons);
533 while (sw_comp_cons != hw_comp_cons) {
534 struct sw_rx_bd *rx_buf = NULL;
536 union eth_rx_cqe *cqe;
540 comp_ring_cons = RCQ_BD(sw_comp_cons);
541 bd_prod = RX_BD(bd_prod);
542 bd_cons = RX_BD(bd_cons);
544 /* Prefetch the page containing the BD descriptor
545 at producer's index. It will be needed when new skb is
547 prefetch((void *)(PAGE_ALIGN((unsigned long)
548 (&fp->rx_desc_ring[bd_prod])) -
551 cqe = &fp->rx_comp_ring[comp_ring_cons];
552 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
554 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
555 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
556 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
557 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
558 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
559 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
561 /* is this a slowpath msg? */
562 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
563 bnx2x_sp_event(fp, cqe);
566 /* this is an rx packet */
568 rx_buf = &fp->rx_buf_ring[bd_cons];
571 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
572 pad = cqe->fast_path_cqe.placement_offset;
574 /* - If CQE is marked both TPA_START and TPA_END it is
576 * - FP CQE will always have either TPA_START or/and
577 * TPA_STOP flags set.
579 if ((!fp->disable_tpa) &&
580 (TPA_TYPE(cqe_fp_flags) !=
581 (TPA_TYPE_START | TPA_TYPE_END))) {
582 u16 queue = cqe->fast_path_cqe.queue_index;
584 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
585 DP(NETIF_MSG_RX_STATUS,
586 "calling tpa_start on queue %d\n",
589 bnx2x_tpa_start(fp, queue, skb,
592 /* Set Toeplitz hash for an LRO skb */
593 bnx2x_set_skb_rxhash(bp, cqe, skb);
596 } else { /* TPA_STOP */
597 DP(NETIF_MSG_RX_STATUS,
598 "calling tpa_stop on queue %d\n",
601 if (!BNX2X_RX_SUM_FIX(cqe))
602 BNX2X_ERR("STOP on none TCP "
605 /* This is a size of the linear data
607 len = le16_to_cpu(cqe->fast_path_cqe.
609 bnx2x_tpa_stop(bp, fp, queue, pad,
610 len, cqe, comp_ring_cons);
611 #ifdef BNX2X_STOP_ON_ERROR
616 bnx2x_update_sge_prod(fp,
617 &cqe->fast_path_cqe);
622 dma_sync_single_for_device(&bp->pdev->dev,
623 dma_unmap_addr(rx_buf, mapping),
624 pad + RX_COPY_THRESH,
626 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
628 /* is this an error packet? */
629 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
631 "ERROR flags %x rx packet %u\n",
632 cqe_fp_flags, sw_comp_cons);
633 fp->eth_q_stats.rx_err_discard_pkt++;
637 /* Since we don't have a jumbo ring
638 * copy small packets if mtu > 1500
640 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
641 (len <= RX_COPY_THRESH)) {
642 struct sk_buff *new_skb;
644 new_skb = netdev_alloc_skb(bp->dev,
646 if (new_skb == NULL) {
648 "ERROR packet dropped "
649 "because of alloc failure\n");
650 fp->eth_q_stats.rx_skb_alloc_failed++;
655 skb_copy_from_linear_data_offset(skb, pad,
656 new_skb->data + pad, len);
657 skb_reserve(new_skb, pad);
658 skb_put(new_skb, len);
660 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
665 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
666 dma_unmap_single(&bp->pdev->dev,
667 dma_unmap_addr(rx_buf, mapping),
670 skb_reserve(skb, pad);
675 "ERROR packet dropped because "
676 "of alloc failure\n");
677 fp->eth_q_stats.rx_skb_alloc_failed++;
679 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
683 skb->protocol = eth_type_trans(skb, bp->dev);
685 /* Set Toeplitz hash for a none-LRO skb */
686 bnx2x_set_skb_rxhash(bp, cqe, skb);
688 skb_checksum_none_assert(skb);
690 if (bp->dev->features & NETIF_F_RXCSUM) {
691 if (likely(BNX2X_RX_CSUM_OK(cqe)))
692 skb->ip_summed = CHECKSUM_UNNECESSARY;
694 fp->eth_q_stats.hw_csum_err++;
698 skb_record_rx_queue(skb, fp->index);
700 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
702 __vlan_hwaccel_put_tag(skb,
703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
704 napi_gro_receive(&fp->napi, skb);
710 bd_cons = NEXT_RX_IDX(bd_cons);
711 bd_prod = NEXT_RX_IDX(bd_prod);
712 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
715 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
716 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
718 if (rx_pkt == budget)
722 fp->rx_bd_cons = bd_cons;
723 fp->rx_bd_prod = bd_prod_fw;
724 fp->rx_comp_cons = sw_comp_cons;
725 fp->rx_comp_prod = sw_comp_prod;
727 /* Update producers */
728 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
731 fp->rx_pkt += rx_pkt;
737 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
739 struct bnx2x_fastpath *fp = fp_cookie;
740 struct bnx2x *bp = fp->bp;
742 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
743 "[fp %d fw_sd %d igusb %d]\n",
744 fp->index, fp->fw_sb_id, fp->igu_sb_id);
745 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
747 #ifdef BNX2X_STOP_ON_ERROR
748 if (unlikely(bp->panic))
752 /* Handle Rx and Tx according to MSI-X vector */
753 prefetch(fp->rx_cons_sb);
754 prefetch(fp->tx_cons_sb);
755 prefetch(&fp->sb_running_index[SM_RX_ID]);
756 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
761 /* HW Lock for shared dual port PHYs */
762 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
764 mutex_lock(&bp->port.phy_mutex);
766 if (bp->port.need_hw_lock)
767 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
770 void bnx2x_release_phy_lock(struct bnx2x *bp)
772 if (bp->port.need_hw_lock)
773 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
775 mutex_unlock(&bp->port.phy_mutex);
778 /* calculates MF speed according to current linespeed and MF configuration */
779 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
781 u16 line_speed = bp->link_vars.line_speed;
783 u16 maxCfg = bnx2x_extract_max_cfg(bp,
784 bp->mf_config[BP_VN(bp)]);
786 /* Calculate the current MAX line speed limit for the MF
790 line_speed = (line_speed * maxCfg) / 100;
792 u16 vn_max_rate = maxCfg * 100;
794 if (vn_max_rate < line_speed)
795 line_speed = vn_max_rate;
803 * bnx2x_fill_report_data - fill link report data to report
806 * @data: link state to update
808 * It uses a none-atomic bit operations because is called under the mutex.
810 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
811 struct bnx2x_link_report_data *data)
813 u16 line_speed = bnx2x_get_mf_speed(bp);
815 memset(data, 0, sizeof(*data));
817 /* Fill the report data: efective line speed */
818 data->line_speed = line_speed;
821 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
822 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
823 &data->link_report_flags);
826 if (bp->link_vars.duplex == DUPLEX_FULL)
827 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
829 /* Rx Flow Control is ON */
830 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
831 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
833 /* Tx Flow Control is ON */
834 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
835 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
839 * bnx2x_link_report - report link status to OS.
843 * Calls the __bnx2x_link_report() under the same locking scheme
844 * as a link/PHY state managing code to ensure a consistent link
848 void bnx2x_link_report(struct bnx2x *bp)
850 bnx2x_acquire_phy_lock(bp);
851 __bnx2x_link_report(bp);
852 bnx2x_release_phy_lock(bp);
856 * __bnx2x_link_report - report link status to OS.
860 * None atomic inmlementation.
861 * Should be called under the phy_lock.
863 void __bnx2x_link_report(struct bnx2x *bp)
865 struct bnx2x_link_report_data cur_data;
869 bnx2x_read_mf_cfg(bp);
871 /* Read the current link report info */
872 bnx2x_fill_report_data(bp, &cur_data);
874 /* Don't report link down or exactly the same link status twice */
875 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
876 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
877 &bp->last_reported_link.link_report_flags) &&
878 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
879 &cur_data.link_report_flags)))
884 /* We are going to report a new link parameters now -
885 * remember the current data for the next time.
887 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
889 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
890 &cur_data.link_report_flags)) {
891 netif_carrier_off(bp->dev);
892 netdev_err(bp->dev, "NIC Link is Down\n");
895 netif_carrier_on(bp->dev);
896 netdev_info(bp->dev, "NIC Link is Up, ");
897 pr_cont("%d Mbps ", cur_data.line_speed);
899 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
900 &cur_data.link_report_flags))
901 pr_cont("full duplex");
903 pr_cont("half duplex");
905 /* Handle the FC at the end so that only these flags would be
906 * possibly set. This way we may easily check if there is no FC
909 if (cur_data.link_report_flags) {
910 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
911 &cur_data.link_report_flags)) {
912 pr_cont(", receive ");
913 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
914 &cur_data.link_report_flags))
915 pr_cont("& transmit ");
917 pr_cont(", transmit ");
919 pr_cont("flow control ON");
925 void bnx2x_init_rx_rings(struct bnx2x *bp)
927 int func = BP_FUNC(bp);
928 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
929 ETH_MAX_AGGREGATION_QUEUES_E1H;
933 /* Allocate TPA resources */
934 for_each_rx_queue(bp, j) {
935 struct bnx2x_fastpath *fp = &bp->fp[j];
938 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
940 if (!fp->disable_tpa) {
941 /* Fill the per-aggregation pool */
942 for (i = 0; i < max_agg_queues; i++) {
943 fp->tpa_pool[i].skb =
944 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
945 if (!fp->tpa_pool[i].skb) {
946 BNX2X_ERR("Failed to allocate TPA "
947 "skb pool for queue[%d] - "
948 "disabling TPA on this "
950 bnx2x_free_tpa_pool(bp, fp, i);
954 dma_unmap_addr_set((struct sw_rx_bd *)
955 &bp->fp->tpa_pool[i],
957 fp->tpa_state[i] = BNX2X_TPA_STOP;
960 /* "next page" elements initialization */
961 bnx2x_set_next_page_sgl(fp);
963 /* set SGEs bit mask */
964 bnx2x_init_sge_ring_bit_mask(fp);
966 /* Allocate SGEs and initialize the ring elements */
967 for (i = 0, ring_prod = 0;
968 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
970 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
971 BNX2X_ERR("was only able to allocate "
973 BNX2X_ERR("disabling TPA for"
975 /* Cleanup already allocated elements */
976 bnx2x_free_rx_sge_range(bp,
978 bnx2x_free_tpa_pool(bp,
984 ring_prod = NEXT_SGE_IDX(ring_prod);
987 fp->rx_sge_prod = ring_prod;
991 for_each_rx_queue(bp, j) {
992 struct bnx2x_fastpath *fp = &bp->fp[j];
996 /* Activate BD ring */
998 * this will generate an interrupt (to the TSTORM)
999 * must only be done after chip is initialized
1001 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1007 if (!CHIP_IS_E2(bp)) {
1008 REG_WR(bp, BAR_USTRORM_INTMEM +
1009 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1010 U64_LO(fp->rx_comp_mapping));
1011 REG_WR(bp, BAR_USTRORM_INTMEM +
1012 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1013 U64_HI(fp->rx_comp_mapping));
1018 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1022 for_each_tx_queue(bp, i) {
1023 struct bnx2x_fastpath *fp = &bp->fp[i];
1025 u16 bd_cons = fp->tx_bd_cons;
1026 u16 sw_prod = fp->tx_pkt_prod;
1027 u16 sw_cons = fp->tx_pkt_cons;
1029 while (sw_cons != sw_prod) {
1030 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1036 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1038 struct bnx2x *bp = fp->bp;
1041 /* ring wasn't allocated */
1042 if (fp->rx_buf_ring == NULL)
1045 for (i = 0; i < NUM_RX_BD; i++) {
1046 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1047 struct sk_buff *skb = rx_buf->skb;
1052 dma_unmap_single(&bp->pdev->dev,
1053 dma_unmap_addr(rx_buf, mapping),
1054 fp->rx_buf_size, DMA_FROM_DEVICE);
1061 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1065 for_each_rx_queue(bp, j) {
1066 struct bnx2x_fastpath *fp = &bp->fp[j];
1068 bnx2x_free_rx_bds(fp);
1070 if (!fp->disable_tpa)
1071 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1072 ETH_MAX_AGGREGATION_QUEUES_E1 :
1073 ETH_MAX_AGGREGATION_QUEUES_E1H);
1077 void bnx2x_free_skbs(struct bnx2x *bp)
1079 bnx2x_free_tx_skbs(bp);
1080 bnx2x_free_rx_skbs(bp);
1083 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1085 /* load old values */
1086 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1088 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1089 /* leave all but MAX value */
1090 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1092 /* set new MAX value */
1093 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1094 & FUNC_MF_CFG_MAX_BW_MASK;
1096 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1101 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1103 * @bp: driver handle
1104 * @nvecs: number of vectors to be released
1106 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1110 if (nvecs == offset)
1112 free_irq(bp->msix_table[offset].vector, bp->dev);
1113 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1114 bp->msix_table[offset].vector);
1117 if (nvecs == offset)
1122 for_each_eth_queue(bp, i) {
1123 if (nvecs == offset)
1125 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1126 "irq\n", i, bp->msix_table[offset].vector);
1128 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1132 void bnx2x_free_irq(struct bnx2x *bp)
1134 if (bp->flags & USING_MSIX_FLAG)
1135 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1136 CNIC_CONTEXT_USE + 1);
1137 else if (bp->flags & USING_MSI_FLAG)
1138 free_irq(bp->pdev->irq, bp->dev);
1140 free_irq(bp->pdev->irq, bp->dev);
1143 int bnx2x_enable_msix(struct bnx2x *bp)
1145 int msix_vec = 0, i, rc, req_cnt;
1147 bp->msix_table[msix_vec].entry = msix_vec;
1148 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1149 bp->msix_table[0].entry);
1153 bp->msix_table[msix_vec].entry = msix_vec;
1154 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1155 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1158 for_each_eth_queue(bp, i) {
1159 bp->msix_table[msix_vec].entry = msix_vec;
1160 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1161 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1165 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1167 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1170 * reconfigure number of tx/rx queues according to available
1173 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1174 /* how less vectors we will have? */
1175 int diff = req_cnt - rc;
1178 "Trying to use less MSI-X vectors: %d\n", rc);
1180 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1184 "MSI-X is not attainable rc %d\n", rc);
1188 * decrease number of queues by number of unallocated entries
1190 bp->num_queues -= diff;
1192 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1195 /* fall to INTx if not enough memory */
1197 bp->flags |= DISABLE_MSI_FLAG;
1198 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1202 bp->flags |= USING_MSIX_FLAG;
1207 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1209 int i, rc, offset = 0;
1211 rc = request_irq(bp->msix_table[offset++].vector,
1212 bnx2x_msix_sp_int, 0,
1213 bp->dev->name, bp->dev);
1215 BNX2X_ERR("request sp irq failed\n");
1222 for_each_eth_queue(bp, i) {
1223 struct bnx2x_fastpath *fp = &bp->fp[i];
1224 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1227 rc = request_irq(bp->msix_table[offset].vector,
1228 bnx2x_msix_fp_int, 0, fp->name, fp);
1230 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1231 bp->msix_table[offset].vector, rc);
1232 bnx2x_free_msix_irqs(bp, offset);
1237 fp->state = BNX2X_FP_STATE_IRQ;
1240 i = BNX2X_NUM_ETH_QUEUES(bp);
1241 offset = 1 + CNIC_CONTEXT_USE;
1242 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1244 bp->msix_table[0].vector,
1245 0, bp->msix_table[offset].vector,
1246 i - 1, bp->msix_table[offset + i - 1].vector);
1251 int bnx2x_enable_msi(struct bnx2x *bp)
1255 rc = pci_enable_msi(bp->pdev);
1257 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1260 bp->flags |= USING_MSI_FLAG;
1265 static int bnx2x_req_irq(struct bnx2x *bp)
1267 unsigned long flags;
1270 if (bp->flags & USING_MSI_FLAG)
1273 flags = IRQF_SHARED;
1275 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1276 bp->dev->name, bp->dev);
1278 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1283 static void bnx2x_napi_enable(struct bnx2x *bp)
1287 for_each_napi_queue(bp, i)
1288 napi_enable(&bnx2x_fp(bp, i, napi));
1291 static void bnx2x_napi_disable(struct bnx2x *bp)
1295 for_each_napi_queue(bp, i)
1296 napi_disable(&bnx2x_fp(bp, i, napi));
1299 void bnx2x_netif_start(struct bnx2x *bp)
1301 if (netif_running(bp->dev)) {
1302 bnx2x_napi_enable(bp);
1303 bnx2x_int_enable(bp);
1304 if (bp->state == BNX2X_STATE_OPEN)
1305 netif_tx_wake_all_queues(bp->dev);
1309 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1311 bnx2x_int_disable_sync(bp, disable_hw);
1312 bnx2x_napi_disable(bp);
1313 netif_tx_disable(bp->dev);
1316 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1319 struct bnx2x *bp = netdev_priv(dev);
1321 return skb_tx_hash(dev, skb);
1323 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1324 u16 ether_type = ntohs(hdr->h_proto);
1326 /* Skip VLAN tag if present */
1327 if (ether_type == ETH_P_8021Q) {
1328 struct vlan_ethhdr *vhdr =
1329 (struct vlan_ethhdr *)skb->data;
1331 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1334 /* If ethertype is FCoE or FIP - use FCoE ring */
1335 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1336 return bnx2x_fcoe(bp, index);
1339 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1341 return __skb_tx_hash(dev, skb,
1342 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1345 void bnx2x_set_num_queues(struct bnx2x *bp)
1347 switch (bp->multi_mode) {
1348 case ETH_RSS_MODE_DISABLED:
1351 case ETH_RSS_MODE_REGULAR:
1352 bp->num_queues = bnx2x_calc_num_queues(bp);
1360 /* Add special queues */
1361 bp->num_queues += NONE_ETH_CONTEXT_USE;
1365 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1369 bnx2x_set_fip_eth_mac_addr(bp, 1);
1370 bnx2x_set_all_enode_macs(bp, 1);
1371 bp->flags |= FCOE_MACS_SET;
1376 static void bnx2x_release_firmware(struct bnx2x *bp)
1378 kfree(bp->init_ops_offsets);
1379 kfree(bp->init_ops);
1380 kfree(bp->init_data);
1381 release_firmware(bp->firmware);
1384 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1386 int rc, num = bp->num_queues;
1390 num -= FCOE_CONTEXT_USE;
1393 netif_set_real_num_tx_queues(bp->dev, num);
1394 rc = netif_set_real_num_rx_queues(bp->dev, num);
1398 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1402 for_each_queue(bp, i) {
1403 struct bnx2x_fastpath *fp = &bp->fp[i];
1405 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1408 * Although there are no IP frames expected to arrive to
1409 * this ring we still want to add an
1410 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1414 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1415 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1418 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1419 IP_HEADER_ALIGNMENT_PADDING;
1423 /* must be called with rtnl_lock */
1424 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1429 /* Set init arrays */
1430 rc = bnx2x_init_firmware(bp);
1432 BNX2X_ERR("Error loading firmware\n");
1436 #ifdef BNX2X_STOP_ON_ERROR
1437 if (unlikely(bp->panic))
1441 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1443 /* Set the initial link reported state to link down */
1444 bnx2x_acquire_phy_lock(bp);
1445 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1446 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1447 &bp->last_reported_link.link_report_flags);
1448 bnx2x_release_phy_lock(bp);
1450 /* must be called before memory allocation and HW init */
1451 bnx2x_ilt_set_info(bp);
1453 /* zero fastpath structures preserving invariants like napi which are
1454 * allocated only once
1456 for_each_queue(bp, i)
1459 /* Set the receive queues buffer size */
1460 bnx2x_set_rx_buf_size(bp);
1462 for_each_queue(bp, i)
1463 bnx2x_fp(bp, i, disable_tpa) =
1464 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1467 /* We don't want TPA on FCoE L2 ring */
1468 bnx2x_fcoe(bp, disable_tpa) = 1;
1471 if (bnx2x_alloc_mem(bp))
1474 /* As long as bnx2x_alloc_mem() may possibly update
1475 * bp->num_queues, bnx2x_set_real_num_queues() should always
1478 rc = bnx2x_set_real_num_queues(bp);
1480 BNX2X_ERR("Unable to set real_num_queues\n");
1484 bnx2x_napi_enable(bp);
1486 /* Send LOAD_REQUEST command to MCP
1487 Returns the type of LOAD command:
1488 if it is the first port to be initialized
1489 common blocks should be initialized, otherwise - not
1491 if (!BP_NOMCP(bp)) {
1492 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1494 BNX2X_ERR("MCP response failure, aborting\n");
1498 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1499 rc = -EBUSY; /* other port in diagnostic mode */
1504 int path = BP_PATH(bp);
1505 int port = BP_PORT(bp);
1507 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1508 path, load_count[path][0], load_count[path][1],
1509 load_count[path][2]);
1510 load_count[path][0]++;
1511 load_count[path][1 + port]++;
1512 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1513 path, load_count[path][0], load_count[path][1],
1514 load_count[path][2]);
1515 if (load_count[path][0] == 1)
1516 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1517 else if (load_count[path][1 + port] == 1)
1518 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1520 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1523 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1524 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1525 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1529 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1532 rc = bnx2x_init_hw(bp, load_code);
1534 BNX2X_ERR("HW init failed, aborting\n");
1535 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1539 /* Connect to IRQs */
1540 rc = bnx2x_setup_irqs(bp);
1542 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1546 /* Setup NIC internals and enable interrupts */
1547 bnx2x_nic_init(bp, load_code);
1549 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1550 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1551 (bp->common.shmem2_base))
1552 SHMEM2_WR(bp, dcc_support,
1553 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1554 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1556 /* Send LOAD_DONE command to MCP */
1557 if (!BP_NOMCP(bp)) {
1558 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1560 BNX2X_ERR("MCP response failure, aborting\n");
1566 bnx2x_dcbx_init(bp);
1568 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1570 rc = bnx2x_func_start(bp);
1572 BNX2X_ERR("Function start failed!\n");
1573 #ifndef BNX2X_STOP_ON_ERROR
1581 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1583 BNX2X_ERR("Setup leading failed!\n");
1584 #ifndef BNX2X_STOP_ON_ERROR
1592 if (!CHIP_IS_E1(bp) &&
1593 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1594 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1595 bp->flags |= MF_FUNC_DIS;
1599 /* Enable Timer scan */
1600 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1603 for_each_nondefault_queue(bp, i) {
1604 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1613 /* Now when Clients are configured we are ready to work */
1614 bp->state = BNX2X_STATE_OPEN;
1617 bnx2x_set_fcoe_eth_macs(bp);
1620 bnx2x_set_eth_mac(bp, 1);
1622 /* Clear MC configuration */
1624 bnx2x_invalidate_e1_mc_list(bp);
1626 bnx2x_invalidate_e1h_mc_list(bp);
1628 /* Clear UC lists configuration */
1629 bnx2x_invalidate_uc_list(bp);
1631 if (bp->pending_max) {
1632 bnx2x_update_max_mf_config(bp, bp->pending_max);
1633 bp->pending_max = 0;
1637 bnx2x_initial_phy_init(bp, load_mode);
1639 /* Initialize Rx filtering */
1640 bnx2x_set_rx_mode(bp->dev);
1642 /* Start fast path */
1643 switch (load_mode) {
1645 /* Tx queue should be only reenabled */
1646 netif_tx_wake_all_queues(bp->dev);
1647 /* Initialize the receive filter. */
1651 netif_tx_start_all_queues(bp->dev);
1652 smp_mb__after_clear_bit();
1656 bp->state = BNX2X_STATE_DIAG;
1664 bnx2x__link_status_update(bp);
1666 /* start the timer */
1667 mod_timer(&bp->timer, jiffies + bp->current_interval);
1670 bnx2x_setup_cnic_irq_info(bp);
1671 if (bp->state == BNX2X_STATE_OPEN)
1672 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1674 bnx2x_inc_load_cnt(bp);
1676 bnx2x_release_firmware(bp);
1682 /* Disable Timer scan */
1683 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1686 bnx2x_int_disable_sync(bp, 1);
1688 /* Free SKBs, SGEs, TPA pool and driver internals */
1689 bnx2x_free_skbs(bp);
1690 for_each_rx_queue(bp, i)
1691 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1696 if (!BP_NOMCP(bp)) {
1697 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1698 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1703 bnx2x_napi_disable(bp);
1707 bnx2x_release_firmware(bp);
1712 /* must be called with rtnl_lock */
1713 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1717 if (bp->state == BNX2X_STATE_CLOSED) {
1718 /* Interface has been removed - nothing to recover */
1719 bp->recovery_state = BNX2X_RECOVERY_DONE;
1721 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1728 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1730 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1732 /* Set "drop all" */
1733 bp->rx_mode = BNX2X_RX_MODE_NONE;
1734 bnx2x_set_storm_rx_mode(bp);
1737 bnx2x_tx_disable(bp);
1739 del_timer_sync(&bp->timer);
1741 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1742 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1744 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1746 /* Cleanup the chip if needed */
1747 if (unload_mode != UNLOAD_RECOVERY)
1748 bnx2x_chip_cleanup(bp, unload_mode);
1750 /* Disable HW interrupts, NAPI and Tx */
1751 bnx2x_netif_stop(bp, 1);
1759 /* Free SKBs, SGEs, TPA pool and driver internals */
1760 bnx2x_free_skbs(bp);
1761 for_each_rx_queue(bp, i)
1762 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1766 bp->state = BNX2X_STATE_CLOSED;
1768 /* The last driver must disable a "close the gate" if there is no
1769 * parity attention or "process kill" pending.
1771 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1772 bnx2x_reset_is_done(bp))
1773 bnx2x_disable_close_the_gate(bp);
1775 /* Reset MCP mail box sequence if there is on going recovery */
1776 if (unload_mode == UNLOAD_RECOVERY)
1782 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1786 /* If there is no power capability, silently succeed */
1788 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1792 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1796 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1797 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1798 PCI_PM_CTRL_PME_STATUS));
1800 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1801 /* delay required during transition out of D3hot */
1806 /* If there are other clients above don't
1807 shut down the power */
1808 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1810 /* Don't shut down the power for emulation and FPGA */
1811 if (CHIP_REV_IS_SLOW(bp))
1814 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1818 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1820 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1823 /* No more memory access after this point until
1824 * device is brought back to D0.
1835 * net_device service functions
1837 int bnx2x_poll(struct napi_struct *napi, int budget)
1840 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1842 struct bnx2x *bp = fp->bp;
1845 #ifdef BNX2X_STOP_ON_ERROR
1846 if (unlikely(bp->panic)) {
1847 napi_complete(napi);
1852 if (bnx2x_has_tx_work(fp))
1855 if (bnx2x_has_rx_work(fp)) {
1856 work_done += bnx2x_rx_int(fp, budget - work_done);
1858 /* must not complete if we consumed full budget */
1859 if (work_done >= budget)
1863 /* Fall out from the NAPI loop if needed */
1864 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1866 /* No need to update SB for FCoE L2 ring as long as
1867 * it's connected to the default SB and the SB
1868 * has been updated when NAPI was scheduled.
1870 if (IS_FCOE_FP(fp)) {
1871 napi_complete(napi);
1876 bnx2x_update_fpsb_idx(fp);
1877 /* bnx2x_has_rx_work() reads the status block,
1878 * thus we need to ensure that status block indices
1879 * have been actually read (bnx2x_update_fpsb_idx)
1880 * prior to this check (bnx2x_has_rx_work) so that
1881 * we won't write the "newer" value of the status block
1882 * to IGU (if there was a DMA right after
1883 * bnx2x_has_rx_work and if there is no rmb, the memory
1884 * reading (bnx2x_update_fpsb_idx) may be postponed
1885 * to right before bnx2x_ack_sb). In this case there
1886 * will never be another interrupt until there is
1887 * another update of the status block, while there
1888 * is still unhandled work.
1892 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1893 napi_complete(napi);
1894 /* Re-enable interrupts */
1896 "Update index to %d\n", fp->fp_hc_idx);
1897 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1898 le16_to_cpu(fp->fp_hc_idx),
1908 /* we split the first BD into headers and data BDs
1909 * to ease the pain of our fellow microcode engineers
1910 * we use one mapping for both BDs
1911 * So far this has only been observed to happen
1912 * in Other Operating Systems(TM)
1914 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1915 struct bnx2x_fastpath *fp,
1916 struct sw_tx_bd *tx_buf,
1917 struct eth_tx_start_bd **tx_bd, u16 hlen,
1918 u16 bd_prod, int nbd)
1920 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1921 struct eth_tx_bd *d_tx_bd;
1923 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1925 /* first fix first BD */
1926 h_tx_bd->nbd = cpu_to_le16(nbd);
1927 h_tx_bd->nbytes = cpu_to_le16(hlen);
1929 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1930 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1931 h_tx_bd->addr_lo, h_tx_bd->nbd);
1933 /* now get a new data BD
1934 * (after the pbd) and fill it */
1935 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1936 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1938 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1939 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1941 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1942 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1943 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1945 /* this marks the BD as one that has no individual mapping */
1946 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1948 DP(NETIF_MSG_TX_QUEUED,
1949 "TSO split data size is %d (%x:%x)\n",
1950 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1953 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1958 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1961 csum = (u16) ~csum_fold(csum_sub(csum,
1962 csum_partial(t_header - fix, fix, 0)));
1965 csum = (u16) ~csum_fold(csum_add(csum,
1966 csum_partial(t_header, -fix, 0)));
1968 return swab16(csum);
1971 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1975 if (skb->ip_summed != CHECKSUM_PARTIAL)
1979 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1981 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1982 rc |= XMIT_CSUM_TCP;
1986 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1987 rc |= XMIT_CSUM_TCP;
1991 if (skb_is_gso_v6(skb))
1992 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1993 else if (skb_is_gso(skb))
1994 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1999 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2000 /* check if packet requires linearization (packet is too fragmented)
2001 no need to check fragmentation if page size > 8K (there will be no
2002 violation to FW restrictions) */
2003 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2008 int first_bd_sz = 0;
2010 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2011 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2013 if (xmit_type & XMIT_GSO) {
2014 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2015 /* Check if LSO packet needs to be copied:
2016 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2017 int wnd_size = MAX_FETCH_BD - 3;
2018 /* Number of windows to check */
2019 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2024 /* Headers length */
2025 hlen = (int)(skb_transport_header(skb) - skb->data) +
2028 /* Amount of data (w/o headers) on linear part of SKB*/
2029 first_bd_sz = skb_headlen(skb) - hlen;
2031 wnd_sum = first_bd_sz;
2033 /* Calculate the first sum - it's special */
2034 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2036 skb_shinfo(skb)->frags[frag_idx].size;
2038 /* If there was data on linear skb data - check it */
2039 if (first_bd_sz > 0) {
2040 if (unlikely(wnd_sum < lso_mss)) {
2045 wnd_sum -= first_bd_sz;
2048 /* Others are easier: run through the frag list and
2049 check all windows */
2050 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2052 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2054 if (unlikely(wnd_sum < lso_mss)) {
2059 skb_shinfo(skb)->frags[wnd_idx].size;
2062 /* in non-LSO too fragmented packet should always
2069 if (unlikely(to_copy))
2070 DP(NETIF_MSG_TX_QUEUED,
2071 "Linearization IS REQUIRED for %s packet. "
2072 "num_frags %d hlen %d first_bd_sz %d\n",
2073 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2074 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2080 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2083 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2084 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2085 ETH_TX_PARSE_BD_E2_LSO_MSS;
2086 if ((xmit_type & XMIT_GSO_V6) &&
2087 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2088 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2092 * bnx2x_set_pbd_gso - update PBD in GSO case.
2096 * @xmit_type: xmit flags
2098 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2099 struct eth_tx_parse_bd_e1x *pbd,
2102 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2103 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2104 pbd->tcp_flags = pbd_tcp_flags(skb);
2106 if (xmit_type & XMIT_GSO_V4) {
2107 pbd->ip_id = swab16(ip_hdr(skb)->id);
2108 pbd->tcp_pseudo_csum =
2109 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2111 0, IPPROTO_TCP, 0));
2114 pbd->tcp_pseudo_csum =
2115 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2116 &ipv6_hdr(skb)->daddr,
2117 0, IPPROTO_TCP, 0));
2119 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2123 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2125 * @bp: driver handle
2127 * @parsing_data: data to be updated
2128 * @xmit_type: xmit flags
2132 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2133 u32 *parsing_data, u32 xmit_type)
2136 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2137 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2138 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2140 if (xmit_type & XMIT_CSUM_TCP) {
2141 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2142 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2143 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2145 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2147 /* We support checksum offload for TCP and UDP only.
2148 * No need to pass the UDP header length - it's a constant.
2150 return skb_transport_header(skb) +
2151 sizeof(struct udphdr) - skb->data;
2154 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2155 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2158 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2160 if (xmit_type & XMIT_CSUM_V4)
2161 tx_start_bd->bd_flags.as_bitfield |=
2162 ETH_TX_BD_FLAGS_IP_CSUM;
2164 tx_start_bd->bd_flags.as_bitfield |=
2165 ETH_TX_BD_FLAGS_IPV6;
2167 if (!(xmit_type & XMIT_CSUM_TCP))
2168 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2173 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2175 * @bp: driver handle
2177 * @pbd: parse BD to be updated
2178 * @xmit_type: xmit flags
2180 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2181 struct eth_tx_parse_bd_e1x *pbd,
2184 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2186 /* for now NS flag is not used in Linux */
2188 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2189 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2191 pbd->ip_hlen_w = (skb_transport_header(skb) -
2192 skb_network_header(skb)) >> 1;
2194 hlen += pbd->ip_hlen_w;
2196 /* We support checksum offload for TCP and UDP only */
2197 if (xmit_type & XMIT_CSUM_TCP)
2198 hlen += tcp_hdrlen(skb) / 2;
2200 hlen += sizeof(struct udphdr) / 2;
2202 pbd->total_hlen_w = cpu_to_le16(hlen);
2205 if (xmit_type & XMIT_CSUM_TCP) {
2206 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2209 s8 fix = SKB_CS_OFF(skb); /* signed! */
2211 DP(NETIF_MSG_TX_QUEUED,
2212 "hlen %d fix %d csum before fix %x\n",
2213 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2215 /* HW bug: fixup the CSUM */
2216 pbd->tcp_pseudo_csum =
2217 bnx2x_csum_fix(skb_transport_header(skb),
2220 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2221 pbd->tcp_pseudo_csum);
2227 /* called with netif_tx_lock
2228 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2229 * netif_wake_queue()
2231 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2233 struct bnx2x *bp = netdev_priv(dev);
2234 struct bnx2x_fastpath *fp;
2235 struct netdev_queue *txq;
2236 struct sw_tx_bd *tx_buf;
2237 struct eth_tx_start_bd *tx_start_bd;
2238 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2239 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2240 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2241 u32 pbd_e2_parsing_data = 0;
2242 u16 pkt_prod, bd_prod;
2245 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2248 __le16 pkt_size = 0;
2250 u8 mac_type = UNICAST_ADDRESS;
2252 #ifdef BNX2X_STOP_ON_ERROR
2253 if (unlikely(bp->panic))
2254 return NETDEV_TX_BUSY;
2257 fp_index = skb_get_queue_mapping(skb);
2258 txq = netdev_get_tx_queue(dev, fp_index);
2260 fp = &bp->fp[fp_index];
2262 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2263 fp->eth_q_stats.driver_xoff++;
2264 netif_tx_stop_queue(txq);
2265 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2266 return NETDEV_TX_BUSY;
2269 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2270 "protocol(%x,%x) gso type %x xmit_type %x\n",
2271 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2272 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2274 eth = (struct ethhdr *)skb->data;
2276 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2277 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2278 if (is_broadcast_ether_addr(eth->h_dest))
2279 mac_type = BROADCAST_ADDRESS;
2281 mac_type = MULTICAST_ADDRESS;
2284 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2285 /* First, check if we need to linearize the skb (due to FW
2286 restrictions). No need to check fragmentation if page size > 8K
2287 (there will be no violation to FW restrictions) */
2288 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2289 /* Statistics of linearization */
2291 if (skb_linearize(skb) != 0) {
2292 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2293 "silently dropping this SKB\n");
2294 dev_kfree_skb_any(skb);
2295 return NETDEV_TX_OK;
2301 Please read carefully. First we use one BD which we mark as start,
2302 then we have a parsing info BD (used for TSO or xsum),
2303 and only then we have the rest of the TSO BDs.
2304 (don't forget to mark the last one as last,
2305 and to unmap only AFTER you write to the BD ...)
2306 And above all, all pdb sizes are in words - NOT DWORDS!
2309 pkt_prod = fp->tx_pkt_prod++;
2310 bd_prod = TX_BD(fp->tx_bd_prod);
2312 /* get a tx_buf and first BD */
2313 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2314 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2316 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2317 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2321 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2323 /* remember the first BD of the packet */
2324 tx_buf->first_bd = fp->tx_bd_prod;
2328 DP(NETIF_MSG_TX_QUEUED,
2329 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2330 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2332 if (vlan_tx_tag_present(skb)) {
2333 tx_start_bd->vlan_or_ethertype =
2334 cpu_to_le16(vlan_tx_tag_get(skb));
2335 tx_start_bd->bd_flags.as_bitfield |=
2336 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2338 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2340 /* turn on parsing and get a BD */
2341 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2343 if (xmit_type & XMIT_CSUM)
2344 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2346 if (CHIP_IS_E2(bp)) {
2347 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2348 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2349 /* Set PBD in checksum offload case */
2350 if (xmit_type & XMIT_CSUM)
2351 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2352 &pbd_e2_parsing_data,
2355 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2356 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2357 /* Set PBD in checksum offload case */
2358 if (xmit_type & XMIT_CSUM)
2359 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2363 /* Map skb linear data for DMA */
2364 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2365 skb_headlen(skb), DMA_TO_DEVICE);
2367 /* Setup the data pointer of the first BD of the packet */
2368 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2369 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2370 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2371 tx_start_bd->nbd = cpu_to_le16(nbd);
2372 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2373 pkt_size = tx_start_bd->nbytes;
2375 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2376 " nbytes %d flags %x vlan %x\n",
2377 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2378 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2379 tx_start_bd->bd_flags.as_bitfield,
2380 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2382 if (xmit_type & XMIT_GSO) {
2384 DP(NETIF_MSG_TX_QUEUED,
2385 "TSO packet len %d hlen %d total len %d tso size %d\n",
2386 skb->len, hlen, skb_headlen(skb),
2387 skb_shinfo(skb)->gso_size);
2389 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2391 if (unlikely(skb_headlen(skb) > hlen))
2392 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2393 hlen, bd_prod, ++nbd);
2395 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2398 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2401 /* Set the PBD's parsing_data field if not zero
2402 * (for the chips newer than 57711).
2404 if (pbd_e2_parsing_data)
2405 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2407 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2409 /* Handle fragmented skb */
2410 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2411 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2413 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2414 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2415 if (total_pkt_bd == NULL)
2416 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2418 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2420 frag->size, DMA_TO_DEVICE);
2422 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2423 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2424 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2425 le16_add_cpu(&pkt_size, frag->size);
2427 DP(NETIF_MSG_TX_QUEUED,
2428 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2429 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2430 le16_to_cpu(tx_data_bd->nbytes));
2433 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2435 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2437 /* now send a tx doorbell, counting the next BD
2438 * if the packet contains or ends with it
2440 if (TX_BD_POFF(bd_prod) < nbd)
2443 if (total_pkt_bd != NULL)
2444 total_pkt_bd->total_pkt_bytes = pkt_size;
2447 DP(NETIF_MSG_TX_QUEUED,
2448 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2449 " tcp_flags %x xsum %x seq %u hlen %u\n",
2450 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2451 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2452 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2453 le16_to_cpu(pbd_e1x->total_hlen_w));
2455 DP(NETIF_MSG_TX_QUEUED,
2456 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2457 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2458 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2459 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2460 pbd_e2->parsing_data);
2461 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2464 * Make sure that the BD data is updated before updating the producer
2465 * since FW might read the BD right after the producer is updated.
2466 * This is only applicable for weak-ordered memory model archs such
2467 * as IA-64. The following barrier is also mandatory since FW will
2468 * assumes packets must have BDs.
2472 fp->tx_db.data.prod += nbd;
2475 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2479 fp->tx_bd_prod += nbd;
2481 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2482 netif_tx_stop_queue(txq);
2484 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2485 * ordering of set_bit() in netif_tx_stop_queue() and read of
2489 fp->eth_q_stats.driver_xoff++;
2490 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2491 netif_tx_wake_queue(txq);
2495 return NETDEV_TX_OK;
2498 /* called with rtnl_lock */
2499 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2501 struct sockaddr *addr = p;
2502 struct bnx2x *bp = netdev_priv(dev);
2504 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2507 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2508 if (netif_running(dev))
2509 bnx2x_set_eth_mac(bp, 1);
2514 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2516 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2517 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2521 if (IS_FCOE_IDX(fp_index)) {
2522 memset(sb, 0, sizeof(union host_hc_status_block));
2523 fp->status_blk_mapping = 0;
2529 BNX2X_PCI_FREE(sb->e2_sb,
2530 bnx2x_fp(bp, fp_index,
2531 status_blk_mapping),
2532 sizeof(struct host_hc_status_block_e2));
2534 BNX2X_PCI_FREE(sb->e1x_sb,
2535 bnx2x_fp(bp, fp_index,
2536 status_blk_mapping),
2537 sizeof(struct host_hc_status_block_e1x));
2542 if (!skip_rx_queue(bp, fp_index)) {
2543 bnx2x_free_rx_bds(fp);
2545 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2546 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2547 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2548 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2549 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2551 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2552 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2553 sizeof(struct eth_fast_path_rx_cqe) *
2557 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2558 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2559 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2560 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2564 if (!skip_tx_queue(bp, fp_index)) {
2565 /* fastpath tx rings: tx_buf tx_desc */
2566 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2567 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2568 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2569 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2571 /* end of fastpath */
2574 void bnx2x_free_fp_mem(struct bnx2x *bp)
2577 for_each_queue(bp, i)
2578 bnx2x_free_fp_mem_at(bp, i);
2581 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2583 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2584 if (CHIP_IS_E2(bp)) {
2585 bnx2x_fp(bp, index, sb_index_values) =
2586 (__le16 *)status_blk.e2_sb->sb.index_values;
2587 bnx2x_fp(bp, index, sb_running_index) =
2588 (__le16 *)status_blk.e2_sb->sb.running_index;
2590 bnx2x_fp(bp, index, sb_index_values) =
2591 (__le16 *)status_blk.e1x_sb->sb.index_values;
2592 bnx2x_fp(bp, index, sb_running_index) =
2593 (__le16 *)status_blk.e1x_sb->sb.running_index;
2597 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2599 union host_hc_status_block *sb;
2600 struct bnx2x_fastpath *fp = &bp->fp[index];
2603 /* if rx_ring_size specified - use it */
2604 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2605 MAX_RX_AVAIL/bp->num_queues;
2607 /* allocate at least number of buffers required by FW */
2608 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2612 bnx2x_fp(bp, index, bp) = bp;
2613 bnx2x_fp(bp, index, index) = index;
2616 sb = &bnx2x_fp(bp, index, status_blk);
2618 if (!IS_FCOE_IDX(index)) {
2622 BNX2X_PCI_ALLOC(sb->e2_sb,
2623 &bnx2x_fp(bp, index, status_blk_mapping),
2624 sizeof(struct host_hc_status_block_e2));
2626 BNX2X_PCI_ALLOC(sb->e1x_sb,
2627 &bnx2x_fp(bp, index, status_blk_mapping),
2628 sizeof(struct host_hc_status_block_e1x));
2633 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
2634 * set shortcuts for it.
2636 if (!IS_FCOE_IDX(index))
2637 set_sb_shortcuts(bp, index);
2640 if (!skip_tx_queue(bp, index)) {
2641 /* fastpath tx rings: tx_buf tx_desc */
2642 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2643 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2644 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2645 &bnx2x_fp(bp, index, tx_desc_mapping),
2646 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2650 if (!skip_rx_queue(bp, index)) {
2651 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2652 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2653 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2654 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2655 &bnx2x_fp(bp, index, rx_desc_mapping),
2656 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2658 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2659 &bnx2x_fp(bp, index, rx_comp_mapping),
2660 sizeof(struct eth_fast_path_rx_cqe) *
2664 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2665 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2666 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2667 &bnx2x_fp(bp, index, rx_sge_mapping),
2668 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2670 bnx2x_set_next_page_rx_bd(fp);
2673 bnx2x_set_next_page_rx_cq(fp);
2676 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2677 if (ring_size < rx_ring_size)
2683 /* handles low memory cases */
2685 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2687 /* FW will drop all packets if queue is not big enough,
2688 * In these cases we disable the queue
2689 * Min size diferent for TPA and non-TPA queues
2691 if (ring_size < (fp->disable_tpa ?
2692 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
2693 /* release memory allocated for this queue */
2694 bnx2x_free_fp_mem_at(bp, index);
2700 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2705 * 1. Allocate FP for leading - fatal if error
2706 * 2. {CNIC} Allocate FCoE FP - fatal if error
2707 * 3. Allocate RSS - fix number of queues if error
2711 if (bnx2x_alloc_fp_mem_at(bp, 0))
2716 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2717 /* we will fail load process instead of mark
2723 for_each_nondefault_eth_queue(bp, i)
2724 if (bnx2x_alloc_fp_mem_at(bp, i))
2727 /* handle memory failures */
2728 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2729 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2734 * move non eth FPs next to last eth FP
2735 * must be done in that order
2736 * FCOE_IDX < FWD_IDX < OOO_IDX
2740 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2742 bp->num_queues -= delta;
2743 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2744 bp->num_queues + delta, bp->num_queues);
2750 static int bnx2x_setup_irqs(struct bnx2x *bp)
2753 if (bp->flags & USING_MSIX_FLAG) {
2754 rc = bnx2x_req_msix_irqs(bp);
2759 rc = bnx2x_req_irq(bp);
2761 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2764 if (bp->flags & USING_MSI_FLAG) {
2765 bp->dev->irq = bp->pdev->irq;
2766 netdev_info(bp->dev, "using MSI IRQ %d\n",
2774 void bnx2x_free_mem_bp(struct bnx2x *bp)
2777 kfree(bp->msix_table);
2781 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2783 struct bnx2x_fastpath *fp;
2784 struct msix_entry *tbl;
2785 struct bnx2x_ilt *ilt;
2788 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2794 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2798 bp->msix_table = tbl;
2801 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2808 bnx2x_free_mem_bp(bp);
2813 static int bnx2x_reload_if_running(struct net_device *dev)
2815 struct bnx2x *bp = netdev_priv(dev);
2817 if (unlikely(!netif_running(dev)))
2820 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2821 return bnx2x_nic_load(bp, LOAD_NORMAL);
2824 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
2826 u32 sel_phy_idx = 0;
2827 if (bp->link_params.num_phys <= 1)
2830 if (bp->link_vars.link_up) {
2831 sel_phy_idx = EXT_PHY1;
2832 /* In case link is SERDES, check if the EXT_PHY2 is the one */
2833 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
2834 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
2835 sel_phy_idx = EXT_PHY2;
2838 switch (bnx2x_phy_selection(&bp->link_params)) {
2839 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2840 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
2841 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2842 sel_phy_idx = EXT_PHY1;
2844 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
2845 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2846 sel_phy_idx = EXT_PHY2;
2854 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
2856 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
2858 * The selected actived PHY is always after swapping (in case PHY
2859 * swapping is enabled). So when swapping is enabled, we need to reverse
2863 if (bp->link_params.multi_phy_config &
2864 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
2865 if (sel_phy_idx == EXT_PHY1)
2866 sel_phy_idx = EXT_PHY2;
2867 else if (sel_phy_idx == EXT_PHY2)
2868 sel_phy_idx = EXT_PHY1;
2870 return LINK_CONFIG_IDX(sel_phy_idx);
2873 /* called with rtnl_lock */
2874 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2876 struct bnx2x *bp = netdev_priv(dev);
2878 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2879 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2883 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2884 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2887 /* This does not race with packet allocation
2888 * because the actual alloc size is
2889 * only updated as part of load
2893 return bnx2x_reload_if_running(dev);
2896 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2898 struct bnx2x *bp = netdev_priv(dev);
2900 /* TPA requires Rx CSUM offloading */
2901 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2902 features &= ~NETIF_F_LRO;
2907 int bnx2x_set_features(struct net_device *dev, u32 features)
2909 struct bnx2x *bp = netdev_priv(dev);
2910 u32 flags = bp->flags;
2911 bool bnx2x_reload = false;
2913 if (features & NETIF_F_LRO)
2914 flags |= TPA_ENABLE_FLAG;
2916 flags &= ~TPA_ENABLE_FLAG;
2918 if (features & NETIF_F_LOOPBACK) {
2919 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2920 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2921 bnx2x_reload = true;
2924 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2925 bp->link_params.loopback_mode = LOOPBACK_NONE;
2926 bnx2x_reload = true;
2930 if (flags ^ bp->flags) {
2932 bnx2x_reload = true;
2936 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2937 return bnx2x_reload_if_running(dev);
2938 /* else: bnx2x_nic_load() will be called at end of recovery */
2944 void bnx2x_tx_timeout(struct net_device *dev)
2946 struct bnx2x *bp = netdev_priv(dev);
2948 #ifdef BNX2X_STOP_ON_ERROR
2952 /* This allows the netif to be shutdown gracefully before resetting */
2953 schedule_delayed_work(&bp->reset_task, 0);
2956 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2958 struct net_device *dev = pci_get_drvdata(pdev);
2962 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2965 bp = netdev_priv(dev);
2969 pci_save_state(pdev);
2971 if (!netif_running(dev)) {
2976 netif_device_detach(dev);
2978 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2980 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2987 int bnx2x_resume(struct pci_dev *pdev)
2989 struct net_device *dev = pci_get_drvdata(pdev);
2994 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2997 bp = netdev_priv(dev);
2999 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3000 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3006 pci_restore_state(pdev);
3008 if (!netif_running(dev)) {
3013 bnx2x_set_power_state(bp, PCI_D0);
3014 netif_device_attach(dev);
3016 /* Since the chip was reset, clear the FW sequence number */
3018 rc = bnx2x_nic_load(bp, LOAD_OPEN);