1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
19 #include <linux/etherdevice.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
27 #include <linux/if_vlan.h>
30 #include "bnx2x_init.h"
33 /* free skb in the packet ring at pos idx
34 * return idx of last bd freed
36 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
39 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
40 struct eth_tx_start_bd *tx_start_bd;
41 struct eth_tx_bd *tx_data_bd;
42 struct sk_buff *skb = tx_buf->skb;
43 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
46 /* prefetch skb end pointer to speedup dev_kfree_skb() */
49 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
53 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
54 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
55 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
56 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
58 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
59 #ifdef BNX2X_STOP_ON_ERROR
60 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
61 BNX2X_ERR("BAD nbd!\n");
65 new_cons = nbd + tx_buf->first_bd;
68 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70 /* Skip a parse bd... */
72 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
74 /* ...and the TSO split header bd since they have no mapping */
75 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
77 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
83 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
84 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
85 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
86 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
88 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
100 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
102 struct bnx2x *bp = fp->bp;
103 struct netdev_queue *txq;
104 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
106 #ifdef BNX2X_STOP_ON_ERROR
107 if (unlikely(bp->panic))
111 txq = netdev_get_tx_queue(bp->dev, fp->index);
112 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
113 sw_cons = fp->tx_pkt_cons;
115 while (sw_cons != hw_cons) {
118 pkt_cons = TX_BD(sw_cons);
120 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
122 fp->index, hw_cons, sw_cons, pkt_cons);
124 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
128 fp->tx_pkt_cons = sw_cons;
129 fp->tx_bd_cons = bd_cons;
131 /* Need to make the tx_bd_cons update visible to start_xmit()
132 * before checking for netif_tx_queue_stopped(). Without the
133 * memory barrier, there is a small possibility that
134 * start_xmit() will miss it and cause the queue to be stopped
139 /* TBD need a thresh? */
140 if (unlikely(netif_tx_queue_stopped(txq))) {
141 /* Taking tx_lock() is needed to prevent reenabling the queue
142 * while it's empty. This could have happen if rx_action() gets
143 * suspended in bnx2x_tx_int() after the condition before
144 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
146 * stops the queue->sees fresh tx_bd_cons->releases the queue->
147 * sends some packets consuming the whole queue again->
151 __netif_tx_lock(txq, smp_processor_id());
153 if ((netif_tx_queue_stopped(txq)) &&
154 (bp->state == BNX2X_STATE_OPEN) &&
155 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
156 netif_tx_wake_queue(txq);
158 __netif_tx_unlock(txq);
163 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
166 u16 last_max = fp->last_max_sge;
168 if (SUB_S16(idx, last_max) > 0)
169 fp->last_max_sge = idx;
172 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
173 struct eth_fast_path_rx_cqe *fp_cqe)
175 struct bnx2x *bp = fp->bp;
176 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
177 le16_to_cpu(fp_cqe->len_on_bd)) >>
179 u16 last_max, last_elem, first_elem;
186 /* First mark all used pages */
187 for (i = 0; i < sge_len; i++)
188 SGE_MASK_CLEAR_BIT(fp,
189 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
191 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
192 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
194 /* Here we assume that the last SGE index is the biggest */
195 prefetch((void *)(fp->sge_mask));
196 bnx2x_update_last_max_sge(fp,
197 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
199 last_max = RX_SGE(fp->last_max_sge);
200 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
201 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
203 /* If ring is not full */
204 if (last_elem + 1 != first_elem)
207 /* Now update the prod */
208 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
209 if (likely(fp->sge_mask[i]))
212 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
213 delta += RX_SGE_MASK_ELEM_SZ;
217 fp->rx_sge_prod += delta;
218 /* clear page-end entries */
219 bnx2x_clear_sge_mask_next_elems(fp);
222 DP(NETIF_MSG_RX_STATUS,
223 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
224 fp->last_max_sge, fp->rx_sge_prod);
227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
228 struct sk_buff *skb, u16 cons, u16 prod)
230 struct bnx2x *bp = fp->bp;
231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
236 /* move empty skb from pool to prod and map it */
237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
238 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
239 bp->rx_buf_size, DMA_FROM_DEVICE);
240 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
242 /* move partial skb from cons to pool (don't unmap yet) */
243 fp->tpa_pool[queue] = *cons_rx_buf;
245 /* mark bin state as start - print error if current state != stop */
246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
249 fp->tpa_state[queue] = BNX2X_TPA_START;
251 /* point prod_bd to new skb */
252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
255 #ifdef BNX2X_STOP_ON_ERROR
256 fp->tpa_queue_used |= (1 << queue);
257 #ifdef _ASM_GENERIC_INT_L64_H
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
268 struct eth_fast_path_rx_cqe *fp_cqe,
271 struct sw_rx_page *rx_pg, old_rx_pg;
272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
273 u32 i, frag_len, frag_size, pages;
277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
280 /* This is needed in order to enable forwarding support */
282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
283 max(frag_size, (u32)len_on_bd));
285 #ifdef BNX2X_STOP_ON_ERROR
286 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
287 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
289 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
290 fp_cqe->pkt_len, len_on_bd);
296 /* Run through the SGL and compose the fragmented skb */
297 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
299 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
301 /* FW gives the indices of the SGE as if the ring is an array
302 (meaning that "next" element will consume 2 indices) */
303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
304 rx_pg = &fp->rx_page_ring[sge_idx];
307 /* If we fail to allocate a substitute page, we simply stop
308 where we are and drop the whole packet */
309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
311 fp->eth_q_stats.rx_skb_alloc_failed++;
315 /* Unmap the page as we r going to pass it to the stack */
316 dma_unmap_page(&bp->pdev->dev,
317 dma_unmap_addr(&old_rx_pg, mapping),
318 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
320 /* Add one frag and update the appropriate fields in the skb */
321 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
323 skb->data_len += frag_len;
324 skb->truesize += frag_len;
325 skb->len += frag_len;
327 frag_size -= frag_len;
333 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
334 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
337 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
338 struct sk_buff *skb = rx_buf->skb;
340 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
342 /* Unmap skb in the pool anyway, as we are going to change
343 pool entry status to BNX2X_TPA_STOP even if new skb allocation
345 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
346 bp->rx_buf_size, DMA_FROM_DEVICE);
348 if (likely(new_skb)) {
349 /* fix ip xsum and give it to the stack */
350 /* (no need to map the new skb) */
353 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
355 int is_not_hwaccel_vlan_cqe =
356 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
360 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
362 #ifdef BNX2X_STOP_ON_ERROR
363 if (pad + len > bp->rx_buf_size) {
364 BNX2X_ERR("skb_put is about to fail... "
365 "pad %d len %d rx_buf_size %d\n",
366 pad, len, bp->rx_buf_size);
372 skb_reserve(skb, pad);
375 skb->protocol = eth_type_trans(skb, bp->dev);
376 skb->ip_summed = CHECKSUM_UNNECESSARY;
381 iph = (struct iphdr *)skb->data;
383 /* If there is no Rx VLAN offloading -
384 take VLAN tag into an account */
385 if (unlikely(is_not_hwaccel_vlan_cqe))
386 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
389 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
392 if (!bnx2x_fill_frag_skb(bp, fp, skb,
393 &cqe->fast_path_cqe, cqe_idx)) {
395 if ((bp->vlgrp != NULL) &&
396 (le16_to_cpu(cqe->fast_path_cqe.
397 pars_flags.flags) & PARSING_FLAGS_VLAN))
398 vlan_gro_receive(&fp->napi, bp->vlgrp,
399 le16_to_cpu(cqe->fast_path_cqe.
403 napi_gro_receive(&fp->napi, skb);
405 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
406 " - dropping packet!\n");
411 /* put new skb in bin */
412 fp->tpa_pool[queue].skb = new_skb;
415 /* else drop the packet and keep the buffer in the bin */
416 DP(NETIF_MSG_RX_STATUS,
417 "Failed to allocate new skb - dropping packet!\n");
418 fp->eth_q_stats.rx_skb_alloc_failed++;
421 fp->tpa_state[queue] = BNX2X_TPA_STOP;
424 /* Set Toeplitz hash value in the skb using the value from the
425 * CQE (calculated by HW).
427 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
430 /* Set Toeplitz hash from CQE */
431 if ((bp->dev->features & NETIF_F_RXHASH) &&
432 (cqe->fast_path_cqe.status_flags &
433 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
435 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
438 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
440 struct bnx2x *bp = fp->bp;
441 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
442 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
445 #ifdef BNX2X_STOP_ON_ERROR
446 if (unlikely(bp->panic))
450 /* CQ "next element" is of the size of the regular element,
451 that's why it's ok here */
452 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
453 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
456 bd_cons = fp->rx_bd_cons;
457 bd_prod = fp->rx_bd_prod;
458 bd_prod_fw = bd_prod;
459 sw_comp_cons = fp->rx_comp_cons;
460 sw_comp_prod = fp->rx_comp_prod;
462 /* Memory barrier necessary as speculative reads of the rx
463 * buffer can be ahead of the index in the status block
467 DP(NETIF_MSG_RX_STATUS,
468 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
469 fp->index, hw_comp_cons, sw_comp_cons);
471 while (sw_comp_cons != hw_comp_cons) {
472 struct sw_rx_bd *rx_buf = NULL;
474 union eth_rx_cqe *cqe;
478 comp_ring_cons = RCQ_BD(sw_comp_cons);
479 bd_prod = RX_BD(bd_prod);
480 bd_cons = RX_BD(bd_cons);
482 /* Prefetch the page containing the BD descriptor
483 at producer's index. It will be needed when new skb is
485 prefetch((void *)(PAGE_ALIGN((unsigned long)
486 (&fp->rx_desc_ring[bd_prod])) -
489 cqe = &fp->rx_comp_ring[comp_ring_cons];
490 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
492 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
493 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
494 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
495 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
496 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
497 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
499 /* is this a slowpath msg? */
500 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
501 bnx2x_sp_event(fp, cqe);
504 /* this is an rx packet */
506 rx_buf = &fp->rx_buf_ring[bd_cons];
509 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
510 pad = cqe->fast_path_cqe.placement_offset;
512 /* If CQE is marked both TPA_START and TPA_END
513 it is a non-TPA CQE */
514 if ((!fp->disable_tpa) &&
515 (TPA_TYPE(cqe_fp_flags) !=
516 (TPA_TYPE_START | TPA_TYPE_END))) {
517 u16 queue = cqe->fast_path_cqe.queue_index;
519 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
520 DP(NETIF_MSG_RX_STATUS,
521 "calling tpa_start on queue %d\n",
524 bnx2x_tpa_start(fp, queue, skb,
527 /* Set Toeplitz hash for an LRO skb */
528 bnx2x_set_skb_rxhash(bp, cqe, skb);
533 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
534 DP(NETIF_MSG_RX_STATUS,
535 "calling tpa_stop on queue %d\n",
538 if (!BNX2X_RX_SUM_FIX(cqe))
539 BNX2X_ERR("STOP on none TCP "
542 /* This is a size of the linear data
544 len = le16_to_cpu(cqe->fast_path_cqe.
546 bnx2x_tpa_stop(bp, fp, queue, pad,
547 len, cqe, comp_ring_cons);
548 #ifdef BNX2X_STOP_ON_ERROR
553 bnx2x_update_sge_prod(fp,
554 &cqe->fast_path_cqe);
559 dma_sync_single_for_device(&bp->pdev->dev,
560 dma_unmap_addr(rx_buf, mapping),
561 pad + RX_COPY_THRESH,
563 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
565 /* is this an error packet? */
566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
568 "ERROR flags %x rx packet %u\n",
569 cqe_fp_flags, sw_comp_cons);
570 fp->eth_q_stats.rx_err_discard_pkt++;
574 /* Since we don't have a jumbo ring
575 * copy small packets if mtu > 1500
577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
578 (len <= RX_COPY_THRESH)) {
579 struct sk_buff *new_skb;
581 new_skb = netdev_alloc_skb(bp->dev,
583 if (new_skb == NULL) {
585 "ERROR packet dropped "
586 "because of alloc failure\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
592 skb_copy_from_linear_data_offset(skb, pad,
593 new_skb->data + pad, len);
594 skb_reserve(new_skb, pad);
595 skb_put(new_skb, len);
597 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
602 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
603 dma_unmap_single(&bp->pdev->dev,
604 dma_unmap_addr(rx_buf, mapping),
607 skb_reserve(skb, pad);
612 "ERROR packet dropped because "
613 "of alloc failure\n");
614 fp->eth_q_stats.rx_skb_alloc_failed++;
616 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
620 skb->protocol = eth_type_trans(skb, bp->dev);
622 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb);
625 skb_checksum_none_assert(skb);
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
630 fp->eth_q_stats.hw_csum_err++;
634 skb_record_rx_queue(skb, fp->index);
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
644 napi_gro_receive(&fp->napi, skb);
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
658 if (rx_pkt == budget)
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
671 fp->rx_pkt += rx_pkt;
677 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689 "[fp %d fw_sd %d igusb %d]\n",
690 fp->index, fp->fw_sb_id, fp->igu_sb_id);
691 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
693 #ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->sb_running_index[SM_RX_ID]);
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
708 /* HW Lock for shared dual port PHYs */
709 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
711 mutex_lock(&bp->port.phy_mutex);
713 if (bp->port.need_hw_lock)
714 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
717 void bnx2x_release_phy_lock(struct bnx2x *bp)
719 if (bp->port.need_hw_lock)
720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
722 mutex_unlock(&bp->port.phy_mutex);
725 void bnx2x_link_report(struct bnx2x *bp)
727 if (bp->flags & MF_FUNC_DIS) {
728 netif_carrier_off(bp->dev);
729 netdev_err(bp->dev, "NIC Link is Down\n");
733 if (bp->link_vars.link_up) {
736 if (bp->state == BNX2X_STATE_OPEN)
737 netif_carrier_on(bp->dev);
738 netdev_info(bp->dev, "NIC Link is Up, ");
740 line_speed = bp->link_vars.line_speed;
745 ((bp->mf_config[BP_VN(bp)] &
746 FUNC_MF_CFG_MAX_BW_MASK) >>
747 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
748 if (vn_max_rate < line_speed)
749 line_speed = vn_max_rate;
751 pr_cont("%d Mbps ", line_speed);
753 if (bp->link_vars.duplex == DUPLEX_FULL)
754 pr_cont("full duplex");
756 pr_cont("half duplex");
758 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
759 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
760 pr_cont(", receive ");
761 if (bp->link_vars.flow_ctrl &
763 pr_cont("& transmit ");
765 pr_cont(", transmit ");
767 pr_cont("flow control ON");
771 } else { /* link_down */
772 netif_carrier_off(bp->dev);
773 netdev_err(bp->dev, "NIC Link is Down\n");
777 /* Returns the number of actually allocated BDs */
778 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
781 struct bnx2x *bp = fp->bp;
782 u16 ring_prod, cqe_ring_prod;
785 fp->rx_comp_cons = 0;
786 cqe_ring_prod = ring_prod = 0;
787 for (i = 0; i < rx_ring_size; i++) {
788 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
789 BNX2X_ERR("was only able to allocate "
790 "%d rx skbs on queue[%d]\n", i, fp->index);
791 fp->eth_q_stats.rx_skb_alloc_failed++;
794 ring_prod = NEXT_RX_IDX(ring_prod);
795 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
796 WARN_ON(ring_prod <= i);
799 fp->rx_bd_prod = ring_prod;
800 /* Limit the CQE producer by the CQE ring size */
801 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
803 fp->rx_pkt = fp->rx_calls = 0;
808 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
810 struct bnx2x *bp = fp->bp;
811 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
812 MAX_RX_AVAIL/bp->num_queues;
814 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
816 bnx2x_alloc_rx_bds(fp, rx_ring_size);
819 * this will generate an interrupt (to the TSTORM)
820 * must only be done after chip is initialized
822 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
826 void bnx2x_init_rx_rings(struct bnx2x *bp)
828 int func = BP_FUNC(bp);
829 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
830 ETH_MAX_AGGREGATION_QUEUES_E1H;
834 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
835 BNX2X_FW_IP_HDR_ALIGN_PAD;
838 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
840 for_each_queue(bp, j) {
841 struct bnx2x_fastpath *fp = &bp->fp[j];
843 if (!fp->disable_tpa) {
844 for (i = 0; i < max_agg_queues; i++) {
845 fp->tpa_pool[i].skb =
846 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
847 if (!fp->tpa_pool[i].skb) {
848 BNX2X_ERR("Failed to allocate TPA "
849 "skb pool for queue[%d] - "
850 "disabling TPA on this "
852 bnx2x_free_tpa_pool(bp, fp, i);
856 dma_unmap_addr_set((struct sw_rx_bd *)
857 &bp->fp->tpa_pool[i],
859 fp->tpa_state[i] = BNX2X_TPA_STOP;
862 /* "next page" elements initialization */
863 bnx2x_set_next_page_sgl(fp);
865 /* set SGEs bit mask */
866 bnx2x_init_sge_ring_bit_mask(fp);
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
875 BNX2X_ERR("disabling TPA for"
877 /* Cleanup already allocated elements */
878 bnx2x_free_rx_sge_range(bp,
880 bnx2x_free_tpa_pool(bp,
886 ring_prod = NEXT_SGE_IDX(ring_prod);
889 fp->rx_sge_prod = ring_prod;
893 for_each_queue(bp, j) {
894 struct bnx2x_fastpath *fp = &bp->fp[j];
898 bnx2x_set_next_page_rx_bd(fp);
901 bnx2x_set_next_page_rx_cq(fp);
903 /* Allocate BDs and initialize BD ring */
904 bnx2x_alloc_rx_bd_ring(fp);
909 if (!CHIP_IS_E2(bp)) {
910 REG_WR(bp, BAR_USTRORM_INTMEM +
911 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
912 U64_LO(fp->rx_comp_mapping));
913 REG_WR(bp, BAR_USTRORM_INTMEM +
914 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
915 U64_HI(fp->rx_comp_mapping));
919 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
937 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
965 void bnx2x_free_skbs(struct bnx2x *bp)
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
971 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
991 void bnx2x_free_irq(struct bnx2x *bp)
993 if (bp->flags & USING_MSIX_FLAG)
994 bnx2x_free_msix_irqs(bp);
995 else if (bp->flags & USING_MSI_FLAG)
996 free_irq(bp->pdev->irq, bp->dev);
998 free_irq(bp->pdev->irq, bp->dev);
1001 int bnx2x_enable_msix(struct bnx2x *bp)
1003 int msix_vec = 0, i, rc, req_cnt;
1005 bp->msix_table[msix_vec].entry = msix_vec;
1006 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007 bp->msix_table[0].entry);
1011 bp->msix_table[msix_vec].entry = msix_vec;
1012 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1013 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1016 for_each_queue(bp, i) {
1017 bp->msix_table[msix_vec].entry = msix_vec;
1018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1019 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1023 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1028 * reconfigure number of tx/rx queues according to available
1031 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1032 /* how less vectors we will have? */
1033 int diff = req_cnt - rc;
1036 "Trying to use less MSI-X vectors: %d\n", rc);
1038 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1042 "MSI-X is not attainable rc %d\n", rc);
1046 * decrease number of queues by number of unallocated entries
1048 bp->num_queues -= diff;
1050 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1053 /* fall to INTx if not enough memory */
1055 bp->flags |= DISABLE_MSI_FLAG;
1056 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1060 bp->flags |= USING_MSIX_FLAG;
1065 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1067 int i, rc, offset = 1;
1069 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1070 bp->dev->name, bp->dev);
1072 BNX2X_ERR("request sp irq failed\n");
1079 for_each_queue(bp, i) {
1080 struct bnx2x_fastpath *fp = &bp->fp[i];
1081 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1084 rc = request_irq(bp->msix_table[offset].vector,
1085 bnx2x_msix_fp_int, 0, fp->name, fp);
1087 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1088 bnx2x_free_msix_irqs(bp);
1093 fp->state = BNX2X_FP_STATE_IRQ;
1096 i = BNX2X_NUM_QUEUES(bp);
1097 offset = 1 + CNIC_CONTEXT_USE;
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1107 int bnx2x_enable_msi(struct bnx2x *bp)
1111 rc = pci_enable_msi(bp->pdev);
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1116 bp->flags |= USING_MSI_FLAG;
1121 static int bnx2x_req_irq(struct bnx2x *bp)
1123 unsigned long flags;
1126 if (bp->flags & USING_MSI_FLAG)
1129 flags = IRQF_SHARED;
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1139 static void bnx2x_napi_enable(struct bnx2x *bp)
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1147 static void bnx2x_napi_disable(struct bnx2x *bp)
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1155 void bnx2x_netif_start(struct bnx2x *bp)
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1172 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1179 void bnx2x_set_num_queues(struct bnx2x *bp)
1181 switch (bp->multi_mode) {
1182 case ETH_RSS_MODE_DISABLED:
1185 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp);
1194 static void bnx2x_release_firmware(struct bnx2x *bp)
1196 kfree(bp->init_ops_offsets);
1197 kfree(bp->init_ops);
1198 kfree(bp->init_data);
1199 release_firmware(bp->firmware);
1202 /* must be called with rtnl_lock */
1203 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1208 /* Set init arrays */
1209 rc = bnx2x_init_firmware(bp);
1211 BNX2X_ERR("Error loading firmware\n");
1215 #ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic))
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1222 /* must be called before memory allocation and HW init */
1223 bnx2x_ilt_set_info(bp);
1225 if (bnx2x_alloc_mem(bp))
1228 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1229 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1231 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1235 for_each_queue(bp, i)
1236 bnx2x_fp(bp, i, disable_tpa) =
1237 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1239 bnx2x_napi_enable(bp);
1241 /* Send LOAD_REQUEST command to MCP
1242 Returns the type of LOAD command:
1243 if it is the first port to be initialized
1244 common blocks should be initialized, otherwise - not
1246 if (!BP_NOMCP(bp)) {
1247 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1249 BNX2X_ERR("MCP response failure, aborting\n");
1253 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1254 rc = -EBUSY; /* other port in diagnostic mode */
1259 int path = BP_PATH(bp);
1260 int port = BP_PORT(bp);
1262 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1263 path, load_count[path][0], load_count[path][1],
1264 load_count[path][2]);
1265 load_count[path][0]++;
1266 load_count[path][1 + port]++;
1267 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1268 path, load_count[path][0], load_count[path][1],
1269 load_count[path][2]);
1270 if (load_count[path][0] == 1)
1271 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1272 else if (load_count[path][1 + port] == 1)
1273 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1275 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1278 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1279 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1280 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1284 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1287 rc = bnx2x_init_hw(bp, load_code);
1289 BNX2X_ERR("HW init failed, aborting\n");
1290 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1292 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1296 /* Connect to IRQs */
1297 rc = bnx2x_setup_irqs(bp);
1299 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1303 /* Setup NIC internals and enable interrupts */
1304 bnx2x_nic_init(bp, load_code);
1306 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1307 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1308 (bp->common.shmem2_base))
1309 SHMEM2_WR(bp, dcc_support,
1310 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1311 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1313 /* Send LOAD_DONE command to MCP */
1314 if (!BP_NOMCP(bp)) {
1315 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1317 BNX2X_ERR("MCP response failure, aborting\n");
1323 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1325 rc = bnx2x_func_start(bp);
1327 BNX2X_ERR("Function start failed!\n");
1328 #ifndef BNX2X_STOP_ON_ERROR
1336 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1338 BNX2X_ERR("Setup leading failed!\n");
1339 #ifndef BNX2X_STOP_ON_ERROR
1347 if (!CHIP_IS_E1(bp) &&
1348 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS;
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1367 /* Now when Clients are configured we are ready to work */
1368 bp->state = BNX2X_STATE_OPEN;
1370 bnx2x_set_eth_mac(bp, 1);
1373 /* Set iSCSI L2 MAC */
1374 mutex_lock(&bp->cnic_mutex);
1375 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1376 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1377 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1378 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
1379 BNX2X_VF_ID_INVALID, false,
1380 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
1382 mutex_unlock(&bp->cnic_mutex);
1386 bnx2x_initial_phy_init(bp, load_mode);
1388 /* Start fast path */
1389 switch (load_mode) {
1391 /* Tx queue should be only reenabled */
1392 netif_tx_wake_all_queues(bp->dev);
1393 /* Initialize the receive filter. */
1394 bnx2x_set_rx_mode(bp->dev);
1398 netif_tx_start_all_queues(bp->dev);
1399 smp_mb__after_clear_bit();
1400 /* Initialize the receive filter. */
1401 bnx2x_set_rx_mode(bp->dev);
1405 /* Initialize the receive filter. */
1406 bnx2x_set_rx_mode(bp->dev);
1407 bp->state = BNX2X_STATE_DIAG;
1415 bnx2x__link_status_update(bp);
1417 /* start the timer */
1418 mod_timer(&bp->timer, jiffies + bp->current_interval);
1421 bnx2x_setup_cnic_irq_info(bp);
1422 if (bp->state == BNX2X_STATE_OPEN)
1423 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1425 bnx2x_inc_load_cnt(bp);
1427 bnx2x_release_firmware(bp);
1433 /* Disable Timer scan */
1434 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1437 bnx2x_int_disable_sync(bp, 1);
1439 /* Free SKBs, SGEs, TPA pool and driver internals */
1440 bnx2x_free_skbs(bp);
1441 for_each_queue(bp, i)
1442 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1447 if (!BP_NOMCP(bp)) {
1448 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1449 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1454 bnx2x_napi_disable(bp);
1458 bnx2x_release_firmware(bp);
1463 /* must be called with rtnl_lock */
1464 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1468 if (bp->state == BNX2X_STATE_CLOSED) {
1469 /* Interface has been removed - nothing to recover */
1470 bp->recovery_state = BNX2X_RECOVERY_DONE;
1472 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1479 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1481 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1483 /* Set "drop all" */
1484 bp->rx_mode = BNX2X_RX_MODE_NONE;
1485 bnx2x_set_storm_rx_mode(bp);
1488 bnx2x_tx_disable(bp);
1489 del_timer_sync(&bp->timer);
1490 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1491 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1492 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1495 /* Cleanup the chip if needed */
1496 if (unload_mode != UNLOAD_RECOVERY)
1497 bnx2x_chip_cleanup(bp, unload_mode);
1499 /* Disable HW interrupts, NAPI and Tx */
1500 bnx2x_netif_stop(bp, 1);
1508 /* Free SKBs, SGEs, TPA pool and driver internals */
1509 bnx2x_free_skbs(bp);
1510 for_each_queue(bp, i)
1511 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1515 bp->state = BNX2X_STATE_CLOSED;
1517 /* The last driver must disable a "close the gate" if there is no
1518 * parity attention or "process kill" pending.
1520 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1521 bnx2x_reset_is_done(bp))
1522 bnx2x_disable_close_the_gate(bp);
1524 /* Reset MCP mail box sequence if there is on going recovery */
1525 if (unload_mode == UNLOAD_RECOVERY)
1530 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1534 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1538 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1539 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1540 PCI_PM_CTRL_PME_STATUS));
1542 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1543 /* delay required during transition out of D3hot */
1548 /* If there are other clients above don't
1549 shut down the power */
1550 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1552 /* Don't shut down the power for emulation and FPGA */
1553 if (CHIP_REV_IS_SLOW(bp))
1556 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1560 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1562 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1565 /* No more memory access after this point until
1566 * device is brought back to D0.
1579 * net_device service functions
1582 int bnx2x_poll(struct napi_struct *napi, int budget)
1585 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1587 struct bnx2x *bp = fp->bp;
1590 #ifdef BNX2X_STOP_ON_ERROR
1591 if (unlikely(bp->panic)) {
1592 napi_complete(napi);
1597 if (bnx2x_has_tx_work(fp))
1600 if (bnx2x_has_rx_work(fp)) {
1601 work_done += bnx2x_rx_int(fp, budget - work_done);
1603 /* must not complete if we consumed full budget */
1604 if (work_done >= budget)
1608 /* Fall out from the NAPI loop if needed */
1609 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1610 bnx2x_update_fpsb_idx(fp);
1611 /* bnx2x_has_rx_work() reads the status block,
1612 * thus we need to ensure that status block indices
1613 * have been actually read (bnx2x_update_fpsb_idx)
1614 * prior to this check (bnx2x_has_rx_work) so that
1615 * we won't write the "newer" value of the status block
1616 * to IGU (if there was a DMA right after
1617 * bnx2x_has_rx_work and if there is no rmb, the memory
1618 * reading (bnx2x_update_fpsb_idx) may be postponed
1619 * to right before bnx2x_ack_sb). In this case there
1620 * will never be another interrupt until there is
1621 * another update of the status block, while there
1622 * is still unhandled work.
1626 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1627 napi_complete(napi);
1628 /* Re-enable interrupts */
1630 "Update index to %d\n", fp->fp_hc_idx);
1631 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1632 le16_to_cpu(fp->fp_hc_idx),
1643 /* we split the first BD into headers and data BDs
1644 * to ease the pain of our fellow microcode engineers
1645 * we use one mapping for both BDs
1646 * So far this has only been observed to happen
1647 * in Other Operating Systems(TM)
1649 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1650 struct bnx2x_fastpath *fp,
1651 struct sw_tx_bd *tx_buf,
1652 struct eth_tx_start_bd **tx_bd, u16 hlen,
1653 u16 bd_prod, int nbd)
1655 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1656 struct eth_tx_bd *d_tx_bd;
1658 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1660 /* first fix first BD */
1661 h_tx_bd->nbd = cpu_to_le16(nbd);
1662 h_tx_bd->nbytes = cpu_to_le16(hlen);
1664 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1665 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1666 h_tx_bd->addr_lo, h_tx_bd->nbd);
1668 /* now get a new data BD
1669 * (after the pbd) and fill it */
1670 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1671 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1673 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1674 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1676 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1677 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1678 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1680 /* this marks the BD as one that has no individual mapping */
1681 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1683 DP(NETIF_MSG_TX_QUEUED,
1684 "TSO split data size is %d (%x:%x)\n",
1685 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1688 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1693 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1696 csum = (u16) ~csum_fold(csum_sub(csum,
1697 csum_partial(t_header - fix, fix, 0)));
1700 csum = (u16) ~csum_fold(csum_add(csum,
1701 csum_partial(t_header, -fix, 0)));
1703 return swab16(csum);
1706 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1710 if (skb->ip_summed != CHECKSUM_PARTIAL)
1714 if (skb->protocol == htons(ETH_P_IPV6)) {
1716 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1717 rc |= XMIT_CSUM_TCP;
1721 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1722 rc |= XMIT_CSUM_TCP;
1726 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1727 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1729 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1730 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1735 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1736 /* check if packet requires linearization (packet is too fragmented)
1737 no need to check fragmentation if page size > 8K (there will be no
1738 violation to FW restrictions) */
1739 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1744 int first_bd_sz = 0;
1746 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1747 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1749 if (xmit_type & XMIT_GSO) {
1750 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1751 /* Check if LSO packet needs to be copied:
1752 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1753 int wnd_size = MAX_FETCH_BD - 3;
1754 /* Number of windows to check */
1755 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1760 /* Headers length */
1761 hlen = (int)(skb_transport_header(skb) - skb->data) +
1764 /* Amount of data (w/o headers) on linear part of SKB*/
1765 first_bd_sz = skb_headlen(skb) - hlen;
1767 wnd_sum = first_bd_sz;
1769 /* Calculate the first sum - it's special */
1770 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1772 skb_shinfo(skb)->frags[frag_idx].size;
1774 /* If there was data on linear skb data - check it */
1775 if (first_bd_sz > 0) {
1776 if (unlikely(wnd_sum < lso_mss)) {
1781 wnd_sum -= first_bd_sz;
1784 /* Others are easier: run through the frag list and
1785 check all windows */
1786 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1788 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1790 if (unlikely(wnd_sum < lso_mss)) {
1795 skb_shinfo(skb)->frags[wnd_idx].size;
1798 /* in non-LSO too fragmented packet should always
1805 if (unlikely(to_copy))
1806 DP(NETIF_MSG_TX_QUEUED,
1807 "Linearization IS REQUIRED for %s packet. "
1808 "num_frags %d hlen %d first_bd_sz %d\n",
1809 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1810 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1816 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1817 struct eth_tx_parse_bd_e2 *pbd,
1820 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1821 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1822 if ((xmit_type & XMIT_GSO_V6) &&
1823 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1824 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1828 * Update PBD in GSO case.
1831 * @param tx_start_bd
1835 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1836 struct eth_tx_parse_bd_e1x *pbd,
1839 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1840 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1841 pbd->tcp_flags = pbd_tcp_flags(skb);
1843 if (xmit_type & XMIT_GSO_V4) {
1844 pbd->ip_id = swab16(ip_hdr(skb)->id);
1845 pbd->tcp_pseudo_csum =
1846 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1848 0, IPPROTO_TCP, 0));
1851 pbd->tcp_pseudo_csum =
1852 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1853 &ipv6_hdr(skb)->daddr,
1854 0, IPPROTO_TCP, 0));
1856 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1861 * @param tx_start_bd
1865 * @return header len
1867 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1868 struct eth_tx_parse_bd_e2 *pbd,
1871 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1872 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1874 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1876 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1878 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1884 * @param tx_start_bd
1888 * @return Header length
1890 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1891 struct eth_tx_parse_bd_e1x *pbd,
1894 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1896 /* for now NS flag is not used in Linux */
1898 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1899 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1901 pbd->ip_hlen_w = (skb_transport_header(skb) -
1902 skb_network_header(skb)) / 2;
1904 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1906 pbd->total_hlen_w = cpu_to_le16(hlen);
1909 if (xmit_type & XMIT_CSUM_TCP) {
1910 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1913 s8 fix = SKB_CS_OFF(skb); /* signed! */
1915 DP(NETIF_MSG_TX_QUEUED,
1916 "hlen %d fix %d csum before fix %x\n",
1917 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1919 /* HW bug: fixup the CSUM */
1920 pbd->tcp_pseudo_csum =
1921 bnx2x_csum_fix(skb_transport_header(skb),
1924 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1925 pbd->tcp_pseudo_csum);
1930 /* called with netif_tx_lock
1931 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1932 * netif_wake_queue()
1934 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1936 struct bnx2x *bp = netdev_priv(dev);
1937 struct bnx2x_fastpath *fp;
1938 struct netdev_queue *txq;
1939 struct sw_tx_bd *tx_buf;
1940 struct eth_tx_start_bd *tx_start_bd;
1941 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1942 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1943 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1944 u16 pkt_prod, bd_prod;
1947 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1950 __le16 pkt_size = 0;
1952 u8 mac_type = UNICAST_ADDRESS;
1954 #ifdef BNX2X_STOP_ON_ERROR
1955 if (unlikely(bp->panic))
1956 return NETDEV_TX_BUSY;
1959 fp_index = skb_get_queue_mapping(skb);
1960 txq = netdev_get_tx_queue(dev, fp_index);
1962 fp = &bp->fp[fp_index];
1964 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1965 fp->eth_q_stats.driver_xoff++;
1966 netif_tx_stop_queue(txq);
1967 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1968 return NETDEV_TX_BUSY;
1971 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1972 "protocol(%x,%x) gso type %x xmit_type %x\n",
1973 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1974 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1976 eth = (struct ethhdr *)skb->data;
1978 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1979 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1980 if (is_broadcast_ether_addr(eth->h_dest))
1981 mac_type = BROADCAST_ADDRESS;
1983 mac_type = MULTICAST_ADDRESS;
1986 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1987 /* First, check if we need to linearize the skb (due to FW
1988 restrictions). No need to check fragmentation if page size > 8K
1989 (there will be no violation to FW restrictions) */
1990 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1991 /* Statistics of linearization */
1993 if (skb_linearize(skb) != 0) {
1994 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1995 "silently dropping this SKB\n");
1996 dev_kfree_skb_any(skb);
1997 return NETDEV_TX_OK;
2003 Please read carefully. First we use one BD which we mark as start,
2004 then we have a parsing info BD (used for TSO or xsum),
2005 and only then we have the rest of the TSO BDs.
2006 (don't forget to mark the last one as last,
2007 and to unmap only AFTER you write to the BD ...)
2008 And above all, all pdb sizes are in words - NOT DWORDS!
2011 pkt_prod = fp->tx_pkt_prod++;
2012 bd_prod = TX_BD(fp->tx_bd_prod);
2014 /* get a tx_buf and first BD */
2015 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2016 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2018 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2019 SET_FLAG(tx_start_bd->general_data,
2020 ETH_TX_START_BD_ETH_ADDR_TYPE,
2023 SET_FLAG(tx_start_bd->general_data,
2024 ETH_TX_START_BD_HDR_NBDS,
2027 /* remember the first BD of the packet */
2028 tx_buf->first_bd = fp->tx_bd_prod;
2032 DP(NETIF_MSG_TX_QUEUED,
2033 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2034 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2037 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2038 (bp->flags & HW_VLAN_TX_FLAG)) {
2039 tx_start_bd->vlan_or_ethertype =
2040 cpu_to_le16(vlan_tx_tag_get(skb));
2041 tx_start_bd->bd_flags.as_bitfield |=
2042 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2045 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2047 /* turn on parsing and get a BD */
2048 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2050 if (xmit_type & XMIT_CSUM) {
2051 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2053 if (xmit_type & XMIT_CSUM_V4)
2054 tx_start_bd->bd_flags.as_bitfield |=
2055 ETH_TX_BD_FLAGS_IP_CSUM;
2057 tx_start_bd->bd_flags.as_bitfield |=
2058 ETH_TX_BD_FLAGS_IPV6;
2060 if (!(xmit_type & XMIT_CSUM_TCP))
2061 tx_start_bd->bd_flags.as_bitfield |=
2062 ETH_TX_BD_FLAGS_IS_UDP;
2065 if (CHIP_IS_E2(bp)) {
2066 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2067 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2068 /* Set PBD in checksum offload case */
2069 if (xmit_type & XMIT_CSUM)
2070 hlen = bnx2x_set_pbd_csum_e2(bp,
2071 skb, pbd_e2, xmit_type);
2073 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2074 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2075 /* Set PBD in checksum offload case */
2076 if (xmit_type & XMIT_CSUM)
2077 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2081 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2082 skb_headlen(skb), DMA_TO_DEVICE);
2084 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2085 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2086 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2087 tx_start_bd->nbd = cpu_to_le16(nbd);
2088 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2089 pkt_size = tx_start_bd->nbytes;
2091 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2092 " nbytes %d flags %x vlan %x\n",
2093 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2094 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2095 tx_start_bd->bd_flags.as_bitfield,
2096 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2098 if (xmit_type & XMIT_GSO) {
2100 DP(NETIF_MSG_TX_QUEUED,
2101 "TSO packet len %d hlen %d total len %d tso size %d\n",
2102 skb->len, hlen, skb_headlen(skb),
2103 skb_shinfo(skb)->gso_size);
2105 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2107 if (unlikely(skb_headlen(skb) > hlen))
2108 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2109 hlen, bd_prod, ++nbd);
2111 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2113 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2115 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2117 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2118 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2120 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2121 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2122 if (total_pkt_bd == NULL)
2123 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2125 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2127 frag->size, DMA_TO_DEVICE);
2129 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2130 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2131 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2132 le16_add_cpu(&pkt_size, frag->size);
2134 DP(NETIF_MSG_TX_QUEUED,
2135 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2136 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2137 le16_to_cpu(tx_data_bd->nbytes));
2140 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2142 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2144 /* now send a tx doorbell, counting the next BD
2145 * if the packet contains or ends with it
2147 if (TX_BD_POFF(bd_prod) < nbd)
2150 if (total_pkt_bd != NULL)
2151 total_pkt_bd->total_pkt_bytes = pkt_size;
2154 DP(NETIF_MSG_TX_QUEUED,
2155 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2156 " tcp_flags %x xsum %x seq %u hlen %u\n",
2157 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2158 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2159 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2160 le16_to_cpu(pbd_e1x->total_hlen_w));
2162 DP(NETIF_MSG_TX_QUEUED,
2163 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2164 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2165 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2166 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2167 pbd_e2->parsing_data);
2168 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2171 * Make sure that the BD data is updated before updating the producer
2172 * since FW might read the BD right after the producer is updated.
2173 * This is only applicable for weak-ordered memory model archs such
2174 * as IA-64. The following barrier is also mandatory since FW will
2175 * assumes packets must have BDs.
2179 fp->tx_db.data.prod += nbd;
2181 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2185 fp->tx_bd_prod += nbd;
2187 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2188 netif_tx_stop_queue(txq);
2190 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2191 * ordering of set_bit() in netif_tx_stop_queue() and read of
2195 fp->eth_q_stats.driver_xoff++;
2196 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2197 netif_tx_wake_queue(txq);
2201 return NETDEV_TX_OK;
2203 /* called with rtnl_lock */
2204 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2206 struct sockaddr *addr = p;
2207 struct bnx2x *bp = netdev_priv(dev);
2209 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2212 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2213 if (netif_running(dev))
2214 bnx2x_set_eth_mac(bp, 1);
2220 int bnx2x_setup_irqs(struct bnx2x *bp)
2223 if (bp->flags & USING_MSIX_FLAG) {
2224 rc = bnx2x_req_msix_irqs(bp);
2229 rc = bnx2x_req_irq(bp);
2231 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2234 if (bp->flags & USING_MSI_FLAG) {
2235 bp->dev->irq = bp->pdev->irq;
2236 netdev_info(bp->dev, "using MSI IRQ %d\n",
2244 void bnx2x_free_mem_bp(struct bnx2x *bp)
2247 kfree(bp->msix_table);
2251 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2253 struct bnx2x_fastpath *fp;
2254 struct msix_entry *tbl;
2255 struct bnx2x_ilt *ilt;
2258 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2264 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2268 bp->msix_table = tbl;
2271 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2278 bnx2x_free_mem_bp(bp);
2283 /* called with rtnl_lock */
2284 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2286 struct bnx2x *bp = netdev_priv(dev);
2289 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2290 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2294 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2295 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2298 /* This does not race with packet allocation
2299 * because the actual alloc size is
2300 * only updated as part of load
2304 if (netif_running(dev)) {
2305 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2306 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2312 void bnx2x_tx_timeout(struct net_device *dev)
2314 struct bnx2x *bp = netdev_priv(dev);
2316 #ifdef BNX2X_STOP_ON_ERROR
2320 /* This allows the netif to be shutdown gracefully before resetting */
2321 schedule_delayed_work(&bp->reset_task, 0);
2325 /* called with rtnl_lock */
2326 void bnx2x_vlan_rx_register(struct net_device *dev,
2327 struct vlan_group *vlgrp)
2329 struct bnx2x *bp = netdev_priv(dev);
2335 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2337 struct net_device *dev = pci_get_drvdata(pdev);
2341 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2344 bp = netdev_priv(dev);
2348 pci_save_state(pdev);
2350 if (!netif_running(dev)) {
2355 netif_device_detach(dev);
2357 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2359 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2366 int bnx2x_resume(struct pci_dev *pdev)
2368 struct net_device *dev = pci_get_drvdata(pdev);
2373 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2376 bp = netdev_priv(dev);
2378 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2379 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2385 pci_restore_state(pdev);
2387 if (!netif_running(dev)) {
2392 bnx2x_set_power_state(bp, PCI_D0);
2393 netif_device_attach(dev);
2395 /* Since the chip was reset, clear the FW sequence number */
2397 rc = bnx2x_nic_load(bp, LOAD_OPEN);