1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
26 #include "bnx2x_init.h"
29 /* free skb in the packet ring at pos idx
30 * return idx of last bd freed
32 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
35 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
36 struct eth_tx_start_bd *tx_start_bd;
37 struct eth_tx_bd *tx_data_bd;
38 struct sk_buff *skb = tx_buf->skb;
39 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
42 /* prefetch skb end pointer to speedup dev_kfree_skb() */
45 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
49 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
50 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
51 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
52 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
54 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
55 #ifdef BNX2X_STOP_ON_ERROR
56 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
57 BNX2X_ERR("BAD nbd!\n");
61 new_cons = nbd + tx_buf->first_bd;
64 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66 /* Skip a parse bd... */
68 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70 /* ...and the TSO split header bd since they have no mapping */
71 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
79 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
80 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
81 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
82 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
96 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98 struct bnx2x *bp = fp->bp;
99 struct netdev_queue *txq;
100 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102 #ifdef BNX2X_STOP_ON_ERROR
103 if (unlikely(bp->panic))
107 txq = netdev_get_tx_queue(bp->dev, fp->index);
108 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
109 sw_cons = fp->tx_pkt_cons;
111 while (sw_cons != hw_cons) {
114 pkt_cons = TX_BD(sw_cons);
116 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
118 fp->index, hw_cons, sw_cons, pkt_cons);
120 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
124 fp->tx_pkt_cons = sw_cons;
125 fp->tx_bd_cons = bd_cons;
127 /* Need to make the tx_bd_cons update visible to start_xmit()
128 * before checking for netif_tx_queue_stopped(). Without the
129 * memory barrier, there is a small possibility that
130 * start_xmit() will miss it and cause the queue to be stopped
135 if (unlikely(netif_tx_queue_stopped(txq))) {
136 /* Taking tx_lock() is needed to prevent reenabling the queue
137 * while it's empty. This could have happen if rx_action() gets
138 * suspended in bnx2x_tx_int() after the condition before
139 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141 * stops the queue->sees fresh tx_bd_cons->releases the queue->
142 * sends some packets consuming the whole queue again->
146 __netif_tx_lock(txq, smp_processor_id());
148 if ((netif_tx_queue_stopped(txq)) &&
149 (bp->state == BNX2X_STATE_OPEN) &&
150 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
151 netif_tx_wake_queue(txq);
153 __netif_tx_unlock(txq);
158 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
161 u16 last_max = fp->last_max_sge;
163 if (SUB_S16(idx, last_max) > 0)
164 fp->last_max_sge = idx;
167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
168 struct eth_fast_path_rx_cqe *fp_cqe)
170 struct bnx2x *bp = fp->bp;
171 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
172 le16_to_cpu(fp_cqe->len_on_bd)) >>
174 u16 last_max, last_elem, first_elem;
181 /* First mark all used pages */
182 for (i = 0; i < sge_len; i++)
183 SGE_MASK_CLEAR_BIT(fp,
184 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
186 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
187 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
189 /* Here we assume that the last SGE index is the biggest */
190 prefetch((void *)(fp->sge_mask));
191 bnx2x_update_last_max_sge(fp,
192 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
194 last_max = RX_SGE(fp->last_max_sge);
195 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
196 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198 /* If ring is not full */
199 if (last_elem + 1 != first_elem)
202 /* Now update the prod */
203 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
204 if (likely(fp->sge_mask[i]))
207 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
208 delta += RX_SGE_MASK_ELEM_SZ;
212 fp->rx_sge_prod += delta;
213 /* clear page-end entries */
214 bnx2x_clear_sge_mask_next_elems(fp);
217 DP(NETIF_MSG_RX_STATUS,
218 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
219 fp->last_max_sge, fp->rx_sge_prod);
222 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
223 struct sk_buff *skb, u16 cons, u16 prod)
225 struct bnx2x *bp = fp->bp;
226 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
227 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
228 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
231 /* move empty skb from pool to prod and map it */
232 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
233 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
234 bp->rx_buf_size, DMA_FROM_DEVICE);
235 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237 /* move partial skb from cons to pool (don't unmap yet) */
238 fp->tpa_pool[queue] = *cons_rx_buf;
240 /* mark bin state as start - print error if current state != stop */
241 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
242 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244 fp->tpa_state[queue] = BNX2X_TPA_START;
246 /* point prod_bd to new skb */
247 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
248 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250 #ifdef BNX2X_STOP_ON_ERROR
251 fp->tpa_queue_used |= (1 << queue);
252 #ifdef _ASM_GENERIC_INT_L64_H
253 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
261 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct eth_fast_path_rx_cqe *fp_cqe,
266 struct sw_rx_page *rx_pg, old_rx_pg;
267 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
268 u32 i, frag_len, frag_size, pages;
272 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
273 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
275 /* This is needed in order to enable forwarding support */
277 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
278 max(frag_size, (u32)len_on_bd));
280 #ifdef BNX2X_STOP_ON_ERROR
281 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
282 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
284 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
285 fp_cqe->pkt_len, len_on_bd);
291 /* Run through the SGL and compose the fragmented skb */
292 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
294 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
296 /* FW gives the indices of the SGE as if the ring is an array
297 (meaning that "next" element will consume 2 indices) */
298 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
299 rx_pg = &fp->rx_page_ring[sge_idx];
302 /* If we fail to allocate a substitute page, we simply stop
303 where we are and drop the whole packet */
304 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
306 fp->eth_q_stats.rx_skb_alloc_failed++;
310 /* Unmap the page as we r going to pass it to the stack */
311 dma_unmap_page(&bp->pdev->dev,
312 dma_unmap_addr(&old_rx_pg, mapping),
313 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
315 /* Add one frag and update the appropriate fields in the skb */
316 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
318 skb->data_len += frag_len;
319 skb->truesize += frag_len;
320 skb->len += frag_len;
322 frag_size -= frag_len;
328 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
329 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
332 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
333 struct sk_buff *skb = rx_buf->skb;
335 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
337 /* Unmap skb in the pool anyway, as we are going to change
338 pool entry status to BNX2X_TPA_STOP even if new skb allocation
340 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
341 bp->rx_buf_size, DMA_FROM_DEVICE);
343 if (likely(new_skb)) {
344 /* fix ip xsum and give it to the stack */
345 /* (no need to map the new skb) */
348 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
350 #ifdef BNX2X_STOP_ON_ERROR
351 if (pad + len > bp->rx_buf_size) {
352 BNX2X_ERR("skb_put is about to fail... "
353 "pad %d len %d rx_buf_size %d\n",
354 pad, len, bp->rx_buf_size);
360 skb_reserve(skb, pad);
363 skb->protocol = eth_type_trans(skb, bp->dev);
364 skb->ip_summed = CHECKSUM_UNNECESSARY;
369 iph = (struct iphdr *)skb->data;
371 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
374 if (!bnx2x_fill_frag_skb(bp, fp, skb,
375 &cqe->fast_path_cqe, cqe_idx)) {
376 if ((le16_to_cpu(cqe->fast_path_cqe.
377 pars_flags.flags) & PARSING_FLAGS_VLAN))
378 __vlan_hwaccel_put_tag(skb,
379 le16_to_cpu(cqe->fast_path_cqe.
381 napi_gro_receive(&fp->napi, skb);
383 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
384 " - dropping packet!\n");
389 /* put new skb in bin */
390 fp->tpa_pool[queue].skb = new_skb;
393 /* else drop the packet and keep the buffer in the bin */
394 DP(NETIF_MSG_RX_STATUS,
395 "Failed to allocate new skb - dropping packet!\n");
396 fp->eth_q_stats.rx_skb_alloc_failed++;
399 fp->tpa_state[queue] = BNX2X_TPA_STOP;
402 /* Set Toeplitz hash value in the skb using the value from the
403 * CQE (calculated by HW).
405 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
408 /* Set Toeplitz hash from CQE */
409 if ((bp->dev->features & NETIF_F_RXHASH) &&
410 (cqe->fast_path_cqe.status_flags &
411 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
413 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
416 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
418 struct bnx2x *bp = fp->bp;
419 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
420 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
423 #ifdef BNX2X_STOP_ON_ERROR
424 if (unlikely(bp->panic))
428 /* CQ "next element" is of the size of the regular element,
429 that's why it's ok here */
430 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
431 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
434 bd_cons = fp->rx_bd_cons;
435 bd_prod = fp->rx_bd_prod;
436 bd_prod_fw = bd_prod;
437 sw_comp_cons = fp->rx_comp_cons;
438 sw_comp_prod = fp->rx_comp_prod;
440 /* Memory barrier necessary as speculative reads of the rx
441 * buffer can be ahead of the index in the status block
445 DP(NETIF_MSG_RX_STATUS,
446 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
447 fp->index, hw_comp_cons, sw_comp_cons);
449 while (sw_comp_cons != hw_comp_cons) {
450 struct sw_rx_bd *rx_buf = NULL;
452 union eth_rx_cqe *cqe;
456 comp_ring_cons = RCQ_BD(sw_comp_cons);
457 bd_prod = RX_BD(bd_prod);
458 bd_cons = RX_BD(bd_cons);
460 /* Prefetch the page containing the BD descriptor
461 at producer's index. It will be needed when new skb is
463 prefetch((void *)(PAGE_ALIGN((unsigned long)
464 (&fp->rx_desc_ring[bd_prod])) -
467 cqe = &fp->rx_comp_ring[comp_ring_cons];
468 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
470 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
471 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
472 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
473 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
474 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
475 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
477 /* is this a slowpath msg? */
478 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
479 bnx2x_sp_event(fp, cqe);
482 /* this is an rx packet */
484 rx_buf = &fp->rx_buf_ring[bd_cons];
487 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
488 pad = cqe->fast_path_cqe.placement_offset;
490 /* - If CQE is marked both TPA_START and TPA_END it is
492 * - FP CQE will always have either TPA_START or/and
493 * TPA_STOP flags set.
495 if ((!fp->disable_tpa) &&
496 (TPA_TYPE(cqe_fp_flags) !=
497 (TPA_TYPE_START | TPA_TYPE_END))) {
498 u16 queue = cqe->fast_path_cqe.queue_index;
500 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
501 DP(NETIF_MSG_RX_STATUS,
502 "calling tpa_start on queue %d\n",
505 bnx2x_tpa_start(fp, queue, skb,
508 /* Set Toeplitz hash for an LRO skb */
509 bnx2x_set_skb_rxhash(bp, cqe, skb);
512 } else { /* TPA_STOP */
513 DP(NETIF_MSG_RX_STATUS,
514 "calling tpa_stop on queue %d\n",
517 if (!BNX2X_RX_SUM_FIX(cqe))
518 BNX2X_ERR("STOP on none TCP "
521 /* This is a size of the linear data
523 len = le16_to_cpu(cqe->fast_path_cqe.
525 bnx2x_tpa_stop(bp, fp, queue, pad,
526 len, cqe, comp_ring_cons);
527 #ifdef BNX2X_STOP_ON_ERROR
532 bnx2x_update_sge_prod(fp,
533 &cqe->fast_path_cqe);
538 dma_sync_single_for_device(&bp->pdev->dev,
539 dma_unmap_addr(rx_buf, mapping),
540 pad + RX_COPY_THRESH,
542 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
544 /* is this an error packet? */
545 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
547 "ERROR flags %x rx packet %u\n",
548 cqe_fp_flags, sw_comp_cons);
549 fp->eth_q_stats.rx_err_discard_pkt++;
553 /* Since we don't have a jumbo ring
554 * copy small packets if mtu > 1500
556 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
557 (len <= RX_COPY_THRESH)) {
558 struct sk_buff *new_skb;
560 new_skb = netdev_alloc_skb(bp->dev,
562 if (new_skb == NULL) {
564 "ERROR packet dropped "
565 "because of alloc failure\n");
566 fp->eth_q_stats.rx_skb_alloc_failed++;
571 skb_copy_from_linear_data_offset(skb, pad,
572 new_skb->data + pad, len);
573 skb_reserve(new_skb, pad);
574 skb_put(new_skb, len);
576 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
581 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
582 dma_unmap_single(&bp->pdev->dev,
583 dma_unmap_addr(rx_buf, mapping),
586 skb_reserve(skb, pad);
591 "ERROR packet dropped because "
592 "of alloc failure\n");
593 fp->eth_q_stats.rx_skb_alloc_failed++;
595 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
599 skb->protocol = eth_type_trans(skb, bp->dev);
601 /* Set Toeplitz hash for a none-LRO skb */
602 bnx2x_set_skb_rxhash(bp, cqe, skb);
604 skb_checksum_none_assert(skb);
607 if (likely(BNX2X_RX_CSUM_OK(cqe)))
608 skb->ip_summed = CHECKSUM_UNNECESSARY;
610 fp->eth_q_stats.hw_csum_err++;
614 skb_record_rx_queue(skb, fp->index);
616 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
618 __vlan_hwaccel_put_tag(skb,
619 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
620 napi_gro_receive(&fp->napi, skb);
626 bd_cons = NEXT_RX_IDX(bd_cons);
627 bd_prod = NEXT_RX_IDX(bd_prod);
628 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
631 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
632 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
634 if (rx_pkt == budget)
638 fp->rx_bd_cons = bd_cons;
639 fp->rx_bd_prod = bd_prod_fw;
640 fp->rx_comp_cons = sw_comp_cons;
641 fp->rx_comp_prod = sw_comp_prod;
643 /* Update producers */
644 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
647 fp->rx_pkt += rx_pkt;
653 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
655 struct bnx2x_fastpath *fp = fp_cookie;
656 struct bnx2x *bp = fp->bp;
658 /* Return here if interrupt is disabled */
659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
664 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
665 "[fp %d fw_sd %d igusb %d]\n",
666 fp->index, fp->fw_sb_id, fp->igu_sb_id);
667 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
669 #ifdef BNX2X_STOP_ON_ERROR
670 if (unlikely(bp->panic))
674 /* Handle Rx and Tx according to MSI-X vector */
675 prefetch(fp->rx_cons_sb);
676 prefetch(fp->tx_cons_sb);
677 prefetch(&fp->sb_running_index[SM_RX_ID]);
678 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
683 /* HW Lock for shared dual port PHYs */
684 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
686 mutex_lock(&bp->port.phy_mutex);
688 if (bp->port.need_hw_lock)
689 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
692 void bnx2x_release_phy_lock(struct bnx2x *bp)
694 if (bp->port.need_hw_lock)
695 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
697 mutex_unlock(&bp->port.phy_mutex);
700 void bnx2x_link_report(struct bnx2x *bp)
702 if (bp->flags & MF_FUNC_DIS) {
703 netif_carrier_off(bp->dev);
704 netdev_err(bp->dev, "NIC Link is Down\n");
708 if (bp->link_vars.link_up) {
711 if (bp->state == BNX2X_STATE_OPEN)
712 netif_carrier_on(bp->dev);
713 netdev_info(bp->dev, "NIC Link is Up, ");
715 line_speed = bp->link_vars.line_speed;
720 ((bp->mf_config[BP_VN(bp)] &
721 FUNC_MF_CFG_MAX_BW_MASK) >>
722 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
723 if (vn_max_rate < line_speed)
724 line_speed = vn_max_rate;
726 pr_cont("%d Mbps ", line_speed);
728 if (bp->link_vars.duplex == DUPLEX_FULL)
729 pr_cont("full duplex");
731 pr_cont("half duplex");
733 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
734 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
735 pr_cont(", receive ");
736 if (bp->link_vars.flow_ctrl &
738 pr_cont("& transmit ");
740 pr_cont(", transmit ");
742 pr_cont("flow control ON");
746 } else { /* link_down */
747 netif_carrier_off(bp->dev);
748 netdev_err(bp->dev, "NIC Link is Down\n");
752 /* Returns the number of actually allocated BDs */
753 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
756 struct bnx2x *bp = fp->bp;
757 u16 ring_prod, cqe_ring_prod;
760 fp->rx_comp_cons = 0;
761 cqe_ring_prod = ring_prod = 0;
762 for (i = 0; i < rx_ring_size; i++) {
763 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
764 BNX2X_ERR("was only able to allocate "
765 "%d rx skbs on queue[%d]\n", i, fp->index);
766 fp->eth_q_stats.rx_skb_alloc_failed++;
769 ring_prod = NEXT_RX_IDX(ring_prod);
770 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
771 WARN_ON(ring_prod <= i);
774 fp->rx_bd_prod = ring_prod;
775 /* Limit the CQE producer by the CQE ring size */
776 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
778 fp->rx_pkt = fp->rx_calls = 0;
783 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
785 struct bnx2x *bp = fp->bp;
786 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
787 MAX_RX_AVAIL/bp->num_queues;
789 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
791 bnx2x_alloc_rx_bds(fp, rx_ring_size);
794 * this will generate an interrupt (to the TSTORM)
795 * must only be done after chip is initialized
797 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
801 void bnx2x_init_rx_rings(struct bnx2x *bp)
803 int func = BP_FUNC(bp);
804 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
805 ETH_MAX_AGGREGATION_QUEUES_E1H;
809 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
810 IP_HEADER_ALIGNMENT_PADDING;
813 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
815 for_each_queue(bp, j) {
816 struct bnx2x_fastpath *fp = &bp->fp[j];
818 if (!fp->disable_tpa) {
819 for (i = 0; i < max_agg_queues; i++) {
820 fp->tpa_pool[i].skb =
821 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
822 if (!fp->tpa_pool[i].skb) {
823 BNX2X_ERR("Failed to allocate TPA "
824 "skb pool for queue[%d] - "
825 "disabling TPA on this "
827 bnx2x_free_tpa_pool(bp, fp, i);
831 dma_unmap_addr_set((struct sw_rx_bd *)
832 &bp->fp->tpa_pool[i],
834 fp->tpa_state[i] = BNX2X_TPA_STOP;
837 /* "next page" elements initialization */
838 bnx2x_set_next_page_sgl(fp);
840 /* set SGEs bit mask */
841 bnx2x_init_sge_ring_bit_mask(fp);
843 /* Allocate SGEs and initialize the ring elements */
844 for (i = 0, ring_prod = 0;
845 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
847 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
848 BNX2X_ERR("was only able to allocate "
850 BNX2X_ERR("disabling TPA for"
852 /* Cleanup already allocated elements */
853 bnx2x_free_rx_sge_range(bp,
855 bnx2x_free_tpa_pool(bp,
861 ring_prod = NEXT_SGE_IDX(ring_prod);
864 fp->rx_sge_prod = ring_prod;
868 for_each_queue(bp, j) {
869 struct bnx2x_fastpath *fp = &bp->fp[j];
873 bnx2x_set_next_page_rx_bd(fp);
876 bnx2x_set_next_page_rx_cq(fp);
878 /* Allocate BDs and initialize BD ring */
879 bnx2x_alloc_rx_bd_ring(fp);
884 if (!CHIP_IS_E2(bp)) {
885 REG_WR(bp, BAR_USTRORM_INTMEM +
886 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
887 U64_LO(fp->rx_comp_mapping));
888 REG_WR(bp, BAR_USTRORM_INTMEM +
889 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
890 U64_HI(fp->rx_comp_mapping));
895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
899 for_each_queue(bp, i) {
900 struct bnx2x_fastpath *fp = &bp->fp[i];
902 u16 bd_cons = fp->tx_bd_cons;
903 u16 sw_prod = fp->tx_pkt_prod;
904 u16 sw_cons = fp->tx_pkt_cons;
906 while (sw_cons != sw_prod) {
907 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
913 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
917 for_each_queue(bp, j) {
918 struct bnx2x_fastpath *fp = &bp->fp[j];
920 for (i = 0; i < NUM_RX_BD; i++) {
921 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
922 struct sk_buff *skb = rx_buf->skb;
927 dma_unmap_single(&bp->pdev->dev,
928 dma_unmap_addr(rx_buf, mapping),
929 bp->rx_buf_size, DMA_FROM_DEVICE);
934 if (!fp->disable_tpa)
935 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
936 ETH_MAX_AGGREGATION_QUEUES_E1 :
937 ETH_MAX_AGGREGATION_QUEUES_E1H);
941 void bnx2x_free_skbs(struct bnx2x *bp)
943 bnx2x_free_tx_skbs(bp);
944 bnx2x_free_rx_skbs(bp);
947 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
951 free_irq(bp->msix_table[0].vector, bp->dev);
952 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
953 bp->msix_table[0].vector);
958 for_each_queue(bp, i) {
959 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
960 "state %x\n", i, bp->msix_table[i + offset].vector,
961 bnx2x_fp(bp, i, state));
963 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
967 void bnx2x_free_irq(struct bnx2x *bp)
969 if (bp->flags & USING_MSIX_FLAG)
970 bnx2x_free_msix_irqs(bp);
971 else if (bp->flags & USING_MSI_FLAG)
972 free_irq(bp->pdev->irq, bp->dev);
974 free_irq(bp->pdev->irq, bp->dev);
977 int bnx2x_enable_msix(struct bnx2x *bp)
979 int msix_vec = 0, i, rc, req_cnt;
981 bp->msix_table[msix_vec].entry = msix_vec;
982 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
983 bp->msix_table[0].entry);
987 bp->msix_table[msix_vec].entry = msix_vec;
988 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
989 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
992 for_each_queue(bp, i) {
993 bp->msix_table[msix_vec].entry = msix_vec;
994 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
995 "(fastpath #%u)\n", msix_vec, msix_vec, i);
999 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1001 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1004 * reconfigure number of tx/rx queues according to available
1007 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1008 /* how less vectors we will have? */
1009 int diff = req_cnt - rc;
1012 "Trying to use less MSI-X vectors: %d\n", rc);
1014 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1018 "MSI-X is not attainable rc %d\n", rc);
1022 * decrease number of queues by number of unallocated entries
1024 bp->num_queues -= diff;
1026 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1029 /* fall to INTx if not enough memory */
1031 bp->flags |= DISABLE_MSI_FLAG;
1032 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1036 bp->flags |= USING_MSIX_FLAG;
1041 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1043 int i, rc, offset = 1;
1045 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1046 bp->dev->name, bp->dev);
1048 BNX2X_ERR("request sp irq failed\n");
1055 for_each_queue(bp, i) {
1056 struct bnx2x_fastpath *fp = &bp->fp[i];
1057 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1060 rc = request_irq(bp->msix_table[offset].vector,
1061 bnx2x_msix_fp_int, 0, fp->name, fp);
1063 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1064 bnx2x_free_msix_irqs(bp);
1069 fp->state = BNX2X_FP_STATE_IRQ;
1072 i = BNX2X_NUM_QUEUES(bp);
1073 offset = 1 + CNIC_CONTEXT_USE;
1074 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1076 bp->msix_table[0].vector,
1077 0, bp->msix_table[offset].vector,
1078 i - 1, bp->msix_table[offset + i - 1].vector);
1083 int bnx2x_enable_msi(struct bnx2x *bp)
1087 rc = pci_enable_msi(bp->pdev);
1089 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1092 bp->flags |= USING_MSI_FLAG;
1097 static int bnx2x_req_irq(struct bnx2x *bp)
1099 unsigned long flags;
1102 if (bp->flags & USING_MSI_FLAG)
1105 flags = IRQF_SHARED;
1107 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1108 bp->dev->name, bp->dev);
1110 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1115 static void bnx2x_napi_enable(struct bnx2x *bp)
1119 for_each_queue(bp, i)
1120 napi_enable(&bnx2x_fp(bp, i, napi));
1123 static void bnx2x_napi_disable(struct bnx2x *bp)
1127 for_each_queue(bp, i)
1128 napi_disable(&bnx2x_fp(bp, i, napi));
1131 void bnx2x_netif_start(struct bnx2x *bp)
1135 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1136 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1139 if (netif_running(bp->dev)) {
1140 bnx2x_napi_enable(bp);
1141 bnx2x_int_enable(bp);
1142 if (bp->state == BNX2X_STATE_OPEN)
1143 netif_tx_wake_all_queues(bp->dev);
1148 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1150 bnx2x_int_disable_sync(bp, disable_hw);
1151 bnx2x_napi_disable(bp);
1152 netif_tx_disable(bp->dev);
1155 void bnx2x_set_num_queues(struct bnx2x *bp)
1157 switch (bp->multi_mode) {
1158 case ETH_RSS_MODE_DISABLED:
1161 case ETH_RSS_MODE_REGULAR:
1162 bp->num_queues = bnx2x_calc_num_queues(bp);
1171 static void bnx2x_release_firmware(struct bnx2x *bp)
1173 kfree(bp->init_ops_offsets);
1174 kfree(bp->init_ops);
1175 kfree(bp->init_data);
1176 release_firmware(bp->firmware);
1179 /* must be called with rtnl_lock */
1180 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1185 /* Set init arrays */
1186 rc = bnx2x_init_firmware(bp);
1188 BNX2X_ERR("Error loading firmware\n");
1192 #ifdef BNX2X_STOP_ON_ERROR
1193 if (unlikely(bp->panic))
1197 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1199 /* must be called before memory allocation and HW init */
1200 bnx2x_ilt_set_info(bp);
1202 if (bnx2x_alloc_mem(bp))
1205 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1206 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1208 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1212 for_each_queue(bp, i)
1213 bnx2x_fp(bp, i, disable_tpa) =
1214 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1216 bnx2x_napi_enable(bp);
1218 /* Send LOAD_REQUEST command to MCP
1219 Returns the type of LOAD command:
1220 if it is the first port to be initialized
1221 common blocks should be initialized, otherwise - not
1223 if (!BP_NOMCP(bp)) {
1224 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1226 BNX2X_ERR("MCP response failure, aborting\n");
1230 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1231 rc = -EBUSY; /* other port in diagnostic mode */
1236 int path = BP_PATH(bp);
1237 int port = BP_PORT(bp);
1239 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1240 path, load_count[path][0], load_count[path][1],
1241 load_count[path][2]);
1242 load_count[path][0]++;
1243 load_count[path][1 + port]++;
1244 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1245 path, load_count[path][0], load_count[path][1],
1246 load_count[path][2]);
1247 if (load_count[path][0] == 1)
1248 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1249 else if (load_count[path][1 + port] == 1)
1250 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1252 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1255 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1256 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1257 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1261 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1264 rc = bnx2x_init_hw(bp, load_code);
1266 BNX2X_ERR("HW init failed, aborting\n");
1267 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1271 /* Connect to IRQs */
1272 rc = bnx2x_setup_irqs(bp);
1274 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1278 /* Setup NIC internals and enable interrupts */
1279 bnx2x_nic_init(bp, load_code);
1281 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1282 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1283 (bp->common.shmem2_base))
1284 SHMEM2_WR(bp, dcc_support,
1285 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1286 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1288 /* Send LOAD_DONE command to MCP */
1289 if (!BP_NOMCP(bp)) {
1290 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1292 BNX2X_ERR("MCP response failure, aborting\n");
1298 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1300 rc = bnx2x_func_start(bp);
1302 BNX2X_ERR("Function start failed!\n");
1303 #ifndef BNX2X_STOP_ON_ERROR
1311 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1313 BNX2X_ERR("Setup leading failed!\n");
1314 #ifndef BNX2X_STOP_ON_ERROR
1322 if (!CHIP_IS_E1(bp) &&
1323 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1324 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1325 bp->flags |= MF_FUNC_DIS;
1329 /* Enable Timer scan */
1330 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1333 for_each_nondefault_queue(bp, i) {
1334 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1343 /* Now when Clients are configured we are ready to work */
1344 bp->state = BNX2X_STATE_OPEN;
1346 bnx2x_set_eth_mac(bp, 1);
1349 bnx2x_initial_phy_init(bp, load_mode);
1351 /* Start fast path */
1352 switch (load_mode) {
1354 /* Tx queue should be only reenabled */
1355 netif_tx_wake_all_queues(bp->dev);
1356 /* Initialize the receive filter. */
1357 bnx2x_set_rx_mode(bp->dev);
1361 netif_tx_start_all_queues(bp->dev);
1362 smp_mb__after_clear_bit();
1363 /* Initialize the receive filter. */
1364 bnx2x_set_rx_mode(bp->dev);
1368 /* Initialize the receive filter. */
1369 bnx2x_set_rx_mode(bp->dev);
1370 bp->state = BNX2X_STATE_DIAG;
1378 bnx2x__link_status_update(bp);
1380 /* start the timer */
1381 mod_timer(&bp->timer, jiffies + bp->current_interval);
1384 bnx2x_setup_cnic_irq_info(bp);
1385 if (bp->state == BNX2X_STATE_OPEN)
1386 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1388 bnx2x_inc_load_cnt(bp);
1390 bnx2x_release_firmware(bp);
1396 /* Disable Timer scan */
1397 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1400 bnx2x_int_disable_sync(bp, 1);
1402 /* Free SKBs, SGEs, TPA pool and driver internals */
1403 bnx2x_free_skbs(bp);
1404 for_each_queue(bp, i)
1405 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1410 if (!BP_NOMCP(bp)) {
1411 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1412 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1417 bnx2x_napi_disable(bp);
1421 bnx2x_release_firmware(bp);
1426 /* must be called with rtnl_lock */
1427 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1431 if (bp->state == BNX2X_STATE_CLOSED) {
1432 /* Interface has been removed - nothing to recover */
1433 bp->recovery_state = BNX2X_RECOVERY_DONE;
1435 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1442 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1444 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1446 /* Set "drop all" */
1447 bp->rx_mode = BNX2X_RX_MODE_NONE;
1448 bnx2x_set_storm_rx_mode(bp);
1451 bnx2x_tx_disable(bp);
1453 del_timer_sync(&bp->timer);
1455 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1456 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1458 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1460 /* Cleanup the chip if needed */
1461 if (unload_mode != UNLOAD_RECOVERY)
1462 bnx2x_chip_cleanup(bp, unload_mode);
1464 /* Disable HW interrupts, NAPI and Tx */
1465 bnx2x_netif_stop(bp, 1);
1473 /* Free SKBs, SGEs, TPA pool and driver internals */
1474 bnx2x_free_skbs(bp);
1475 for_each_queue(bp, i)
1476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1480 bp->state = BNX2X_STATE_CLOSED;
1482 /* The last driver must disable a "close the gate" if there is no
1483 * parity attention or "process kill" pending.
1485 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1486 bnx2x_reset_is_done(bp))
1487 bnx2x_disable_close_the_gate(bp);
1489 /* Reset MCP mail box sequence if there is on going recovery */
1490 if (unload_mode == UNLOAD_RECOVERY)
1496 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1500 /* If there is no power capability, silently succeed */
1502 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1506 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1510 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1511 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1512 PCI_PM_CTRL_PME_STATUS));
1514 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1515 /* delay required during transition out of D3hot */
1520 /* If there are other clients above don't
1521 shut down the power */
1522 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1524 /* Don't shut down the power for emulation and FPGA */
1525 if (CHIP_REV_IS_SLOW(bp))
1528 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1532 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1534 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1537 /* No more memory access after this point until
1538 * device is brought back to D0.
1549 * net_device service functions
1551 int bnx2x_poll(struct napi_struct *napi, int budget)
1554 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1556 struct bnx2x *bp = fp->bp;
1559 #ifdef BNX2X_STOP_ON_ERROR
1560 if (unlikely(bp->panic)) {
1561 napi_complete(napi);
1566 if (bnx2x_has_tx_work(fp))
1569 if (bnx2x_has_rx_work(fp)) {
1570 work_done += bnx2x_rx_int(fp, budget - work_done);
1572 /* must not complete if we consumed full budget */
1573 if (work_done >= budget)
1577 /* Fall out from the NAPI loop if needed */
1578 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1579 bnx2x_update_fpsb_idx(fp);
1580 /* bnx2x_has_rx_work() reads the status block,
1581 * thus we need to ensure that status block indices
1582 * have been actually read (bnx2x_update_fpsb_idx)
1583 * prior to this check (bnx2x_has_rx_work) so that
1584 * we won't write the "newer" value of the status block
1585 * to IGU (if there was a DMA right after
1586 * bnx2x_has_rx_work and if there is no rmb, the memory
1587 * reading (bnx2x_update_fpsb_idx) may be postponed
1588 * to right before bnx2x_ack_sb). In this case there
1589 * will never be another interrupt until there is
1590 * another update of the status block, while there
1591 * is still unhandled work.
1595 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1596 napi_complete(napi);
1597 /* Re-enable interrupts */
1599 "Update index to %d\n", fp->fp_hc_idx);
1600 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1601 le16_to_cpu(fp->fp_hc_idx),
1611 /* we split the first BD into headers and data BDs
1612 * to ease the pain of our fellow microcode engineers
1613 * we use one mapping for both BDs
1614 * So far this has only been observed to happen
1615 * in Other Operating Systems(TM)
1617 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1618 struct bnx2x_fastpath *fp,
1619 struct sw_tx_bd *tx_buf,
1620 struct eth_tx_start_bd **tx_bd, u16 hlen,
1621 u16 bd_prod, int nbd)
1623 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1624 struct eth_tx_bd *d_tx_bd;
1626 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1628 /* first fix first BD */
1629 h_tx_bd->nbd = cpu_to_le16(nbd);
1630 h_tx_bd->nbytes = cpu_to_le16(hlen);
1632 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1633 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1634 h_tx_bd->addr_lo, h_tx_bd->nbd);
1636 /* now get a new data BD
1637 * (after the pbd) and fill it */
1638 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1639 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1641 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1642 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1644 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1645 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1646 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1648 /* this marks the BD as one that has no individual mapping */
1649 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1651 DP(NETIF_MSG_TX_QUEUED,
1652 "TSO split data size is %d (%x:%x)\n",
1653 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1656 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1661 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1664 csum = (u16) ~csum_fold(csum_sub(csum,
1665 csum_partial(t_header - fix, fix, 0)));
1668 csum = (u16) ~csum_fold(csum_add(csum,
1669 csum_partial(t_header, -fix, 0)));
1671 return swab16(csum);
1674 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1678 if (skb->ip_summed != CHECKSUM_PARTIAL)
1682 if (skb->protocol == htons(ETH_P_IPV6)) {
1684 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1685 rc |= XMIT_CSUM_TCP;
1689 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1690 rc |= XMIT_CSUM_TCP;
1694 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1695 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1697 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1698 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1703 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1704 /* check if packet requires linearization (packet is too fragmented)
1705 no need to check fragmentation if page size > 8K (there will be no
1706 violation to FW restrictions) */
1707 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1712 int first_bd_sz = 0;
1714 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1715 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1717 if (xmit_type & XMIT_GSO) {
1718 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1719 /* Check if LSO packet needs to be copied:
1720 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1721 int wnd_size = MAX_FETCH_BD - 3;
1722 /* Number of windows to check */
1723 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1728 /* Headers length */
1729 hlen = (int)(skb_transport_header(skb) - skb->data) +
1732 /* Amount of data (w/o headers) on linear part of SKB*/
1733 first_bd_sz = skb_headlen(skb) - hlen;
1735 wnd_sum = first_bd_sz;
1737 /* Calculate the first sum - it's special */
1738 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1740 skb_shinfo(skb)->frags[frag_idx].size;
1742 /* If there was data on linear skb data - check it */
1743 if (first_bd_sz > 0) {
1744 if (unlikely(wnd_sum < lso_mss)) {
1749 wnd_sum -= first_bd_sz;
1752 /* Others are easier: run through the frag list and
1753 check all windows */
1754 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1756 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1758 if (unlikely(wnd_sum < lso_mss)) {
1763 skb_shinfo(skb)->frags[wnd_idx].size;
1766 /* in non-LSO too fragmented packet should always
1773 if (unlikely(to_copy))
1774 DP(NETIF_MSG_TX_QUEUED,
1775 "Linearization IS REQUIRED for %s packet. "
1776 "num_frags %d hlen %d first_bd_sz %d\n",
1777 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1778 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1784 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1785 struct eth_tx_parse_bd_e2 *pbd,
1788 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1789 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1790 if ((xmit_type & XMIT_GSO_V6) &&
1791 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1792 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1796 * Update PBD in GSO case.
1799 * @param tx_start_bd
1803 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1804 struct eth_tx_parse_bd_e1x *pbd,
1807 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1808 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1809 pbd->tcp_flags = pbd_tcp_flags(skb);
1811 if (xmit_type & XMIT_GSO_V4) {
1812 pbd->ip_id = swab16(ip_hdr(skb)->id);
1813 pbd->tcp_pseudo_csum =
1814 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1816 0, IPPROTO_TCP, 0));
1819 pbd->tcp_pseudo_csum =
1820 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1821 &ipv6_hdr(skb)->daddr,
1822 0, IPPROTO_TCP, 0));
1824 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1830 * @param tx_start_bd
1834 * @return header len
1836 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1837 struct eth_tx_parse_bd_e2 *pbd,
1840 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1841 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1843 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1845 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1847 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1853 * @param tx_start_bd
1857 * @return Header length
1859 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1860 struct eth_tx_parse_bd_e1x *pbd,
1863 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1865 /* for now NS flag is not used in Linux */
1867 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1868 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1870 pbd->ip_hlen_w = (skb_transport_header(skb) -
1871 skb_network_header(skb)) / 2;
1873 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1875 pbd->total_hlen_w = cpu_to_le16(hlen);
1878 if (xmit_type & XMIT_CSUM_TCP) {
1879 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1882 s8 fix = SKB_CS_OFF(skb); /* signed! */
1884 DP(NETIF_MSG_TX_QUEUED,
1885 "hlen %d fix %d csum before fix %x\n",
1886 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1888 /* HW bug: fixup the CSUM */
1889 pbd->tcp_pseudo_csum =
1890 bnx2x_csum_fix(skb_transport_header(skb),
1893 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1894 pbd->tcp_pseudo_csum);
1900 /* called with netif_tx_lock
1901 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1902 * netif_wake_queue()
1904 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1906 struct bnx2x *bp = netdev_priv(dev);
1907 struct bnx2x_fastpath *fp;
1908 struct netdev_queue *txq;
1909 struct sw_tx_bd *tx_buf;
1910 struct eth_tx_start_bd *tx_start_bd;
1911 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1912 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1913 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1914 u16 pkt_prod, bd_prod;
1917 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1920 __le16 pkt_size = 0;
1922 u8 mac_type = UNICAST_ADDRESS;
1924 #ifdef BNX2X_STOP_ON_ERROR
1925 if (unlikely(bp->panic))
1926 return NETDEV_TX_BUSY;
1929 fp_index = skb_get_queue_mapping(skb);
1930 txq = netdev_get_tx_queue(dev, fp_index);
1932 fp = &bp->fp[fp_index];
1934 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1935 fp->eth_q_stats.driver_xoff++;
1936 netif_tx_stop_queue(txq);
1937 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1938 return NETDEV_TX_BUSY;
1941 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1942 "protocol(%x,%x) gso type %x xmit_type %x\n",
1943 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1944 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1946 eth = (struct ethhdr *)skb->data;
1948 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1949 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1950 if (is_broadcast_ether_addr(eth->h_dest))
1951 mac_type = BROADCAST_ADDRESS;
1953 mac_type = MULTICAST_ADDRESS;
1956 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1957 /* First, check if we need to linearize the skb (due to FW
1958 restrictions). No need to check fragmentation if page size > 8K
1959 (there will be no violation to FW restrictions) */
1960 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1961 /* Statistics of linearization */
1963 if (skb_linearize(skb) != 0) {
1964 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1965 "silently dropping this SKB\n");
1966 dev_kfree_skb_any(skb);
1967 return NETDEV_TX_OK;
1973 Please read carefully. First we use one BD which we mark as start,
1974 then we have a parsing info BD (used for TSO or xsum),
1975 and only then we have the rest of the TSO BDs.
1976 (don't forget to mark the last one as last,
1977 and to unmap only AFTER you write to the BD ...)
1978 And above all, all pdb sizes are in words - NOT DWORDS!
1981 pkt_prod = fp->tx_pkt_prod++;
1982 bd_prod = TX_BD(fp->tx_bd_prod);
1984 /* get a tx_buf and first BD */
1985 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1986 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1988 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1989 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
1993 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
1995 /* remember the first BD of the packet */
1996 tx_buf->first_bd = fp->tx_bd_prod;
2000 DP(NETIF_MSG_TX_QUEUED,
2001 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2002 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2004 if (vlan_tx_tag_present(skb)) {
2005 tx_start_bd->vlan_or_ethertype =
2006 cpu_to_le16(vlan_tx_tag_get(skb));
2007 tx_start_bd->bd_flags.as_bitfield |=
2008 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2010 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2012 /* turn on parsing and get a BD */
2013 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2015 if (xmit_type & XMIT_CSUM) {
2016 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2018 if (xmit_type & XMIT_CSUM_V4)
2019 tx_start_bd->bd_flags.as_bitfield |=
2020 ETH_TX_BD_FLAGS_IP_CSUM;
2022 tx_start_bd->bd_flags.as_bitfield |=
2023 ETH_TX_BD_FLAGS_IPV6;
2025 if (!(xmit_type & XMIT_CSUM_TCP))
2026 tx_start_bd->bd_flags.as_bitfield |=
2027 ETH_TX_BD_FLAGS_IS_UDP;
2030 if (CHIP_IS_E2(bp)) {
2031 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2032 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2033 /* Set PBD in checksum offload case */
2034 if (xmit_type & XMIT_CSUM)
2035 hlen = bnx2x_set_pbd_csum_e2(bp,
2036 skb, pbd_e2, xmit_type);
2038 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2039 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2040 /* Set PBD in checksum offload case */
2041 if (xmit_type & XMIT_CSUM)
2042 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2046 /* Map skb linear data for DMA */
2047 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2048 skb_headlen(skb), DMA_TO_DEVICE);
2050 /* Setup the data pointer of the first BD of the packet */
2051 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2052 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2053 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2054 tx_start_bd->nbd = cpu_to_le16(nbd);
2055 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2056 pkt_size = tx_start_bd->nbytes;
2058 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2059 " nbytes %d flags %x vlan %x\n",
2060 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2061 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2062 tx_start_bd->bd_flags.as_bitfield,
2063 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2065 if (xmit_type & XMIT_GSO) {
2067 DP(NETIF_MSG_TX_QUEUED,
2068 "TSO packet len %d hlen %d total len %d tso size %d\n",
2069 skb->len, hlen, skb_headlen(skb),
2070 skb_shinfo(skb)->gso_size);
2072 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2074 if (unlikely(skb_headlen(skb) > hlen))
2075 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2076 hlen, bd_prod, ++nbd);
2078 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2080 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2082 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2084 /* Handle fragmented skb */
2085 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2086 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2088 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2089 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2090 if (total_pkt_bd == NULL)
2091 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2093 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2095 frag->size, DMA_TO_DEVICE);
2097 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2098 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2099 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2100 le16_add_cpu(&pkt_size, frag->size);
2102 DP(NETIF_MSG_TX_QUEUED,
2103 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2104 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2105 le16_to_cpu(tx_data_bd->nbytes));
2108 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2110 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2112 /* now send a tx doorbell, counting the next BD
2113 * if the packet contains or ends with it
2115 if (TX_BD_POFF(bd_prod) < nbd)
2118 if (total_pkt_bd != NULL)
2119 total_pkt_bd->total_pkt_bytes = pkt_size;
2122 DP(NETIF_MSG_TX_QUEUED,
2123 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2124 " tcp_flags %x xsum %x seq %u hlen %u\n",
2125 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2126 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2127 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2128 le16_to_cpu(pbd_e1x->total_hlen_w));
2130 DP(NETIF_MSG_TX_QUEUED,
2131 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2132 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2133 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2134 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2135 pbd_e2->parsing_data);
2136 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2139 * Make sure that the BD data is updated before updating the producer
2140 * since FW might read the BD right after the producer is updated.
2141 * This is only applicable for weak-ordered memory model archs such
2142 * as IA-64. The following barrier is also mandatory since FW will
2143 * assumes packets must have BDs.
2147 fp->tx_db.data.prod += nbd;
2150 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2154 fp->tx_bd_prod += nbd;
2156 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2157 netif_tx_stop_queue(txq);
2159 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2160 * ordering of set_bit() in netif_tx_stop_queue() and read of
2164 fp->eth_q_stats.driver_xoff++;
2165 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2166 netif_tx_wake_queue(txq);
2170 return NETDEV_TX_OK;
2173 /* called with rtnl_lock */
2174 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2176 struct sockaddr *addr = p;
2177 struct bnx2x *bp = netdev_priv(dev);
2179 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2182 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2183 if (netif_running(dev))
2184 bnx2x_set_eth_mac(bp, 1);
2190 int bnx2x_setup_irqs(struct bnx2x *bp)
2193 if (bp->flags & USING_MSIX_FLAG) {
2194 rc = bnx2x_req_msix_irqs(bp);
2199 rc = bnx2x_req_irq(bp);
2201 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2204 if (bp->flags & USING_MSI_FLAG) {
2205 bp->dev->irq = bp->pdev->irq;
2206 netdev_info(bp->dev, "using MSI IRQ %d\n",
2214 void bnx2x_free_mem_bp(struct bnx2x *bp)
2217 kfree(bp->msix_table);
2221 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2223 struct bnx2x_fastpath *fp;
2224 struct msix_entry *tbl;
2225 struct bnx2x_ilt *ilt;
2228 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2234 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2238 bp->msix_table = tbl;
2241 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2248 bnx2x_free_mem_bp(bp);
2253 /* called with rtnl_lock */
2254 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2256 struct bnx2x *bp = netdev_priv(dev);
2259 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2260 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2264 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2265 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2268 /* This does not race with packet allocation
2269 * because the actual alloc size is
2270 * only updated as part of load
2274 if (netif_running(dev)) {
2275 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2276 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2282 void bnx2x_tx_timeout(struct net_device *dev)
2284 struct bnx2x *bp = netdev_priv(dev);
2286 #ifdef BNX2X_STOP_ON_ERROR
2290 /* This allows the netif to be shutdown gracefully before resetting */
2291 schedule_delayed_work(&bp->reset_task, 0);
2294 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2296 struct net_device *dev = pci_get_drvdata(pdev);
2300 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2303 bp = netdev_priv(dev);
2307 pci_save_state(pdev);
2309 if (!netif_running(dev)) {
2314 netif_device_detach(dev);
2316 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2318 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2325 int bnx2x_resume(struct pci_dev *pdev)
2327 struct net_device *dev = pci_get_drvdata(pdev);
2332 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2335 bp = netdev_priv(dev);
2337 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2338 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2344 pci_restore_state(pdev);
2346 if (!netif_running(dev)) {
2351 bnx2x_set_power_state(bp, PCI_D0);
2352 netif_device_attach(dev);
2354 /* Since the chip was reset, clear the FW sequence number */
2356 rc = bnx2x_nic_load(bp, LOAD_OPEN);