1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
19 #include <linux/etherdevice.h>
21 #include <linux/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
27 #include <linux/if_vlan.h>
30 static int bnx2x_poll(struct napi_struct *napi, int budget);
32 /* free skb in the packet ring at pos idx
33 * return idx of last bd freed
35 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
55 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58 #ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
64 new_cons = nbd + tx_buf->first_bd;
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69 /* Skip a parse bd... */
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
105 #ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
114 while (sw_cons != hw_cons) {
117 pkt_cons = TX_BD(sw_cons);
119 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
121 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
122 hw_cons, sw_cons, pkt_cons);
124 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
126 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
129 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
133 fp->tx_pkt_cons = sw_cons;
134 fp->tx_bd_cons = bd_cons;
136 /* Need to make the tx_bd_cons update visible to start_xmit()
137 * before checking for netif_tx_queue_stopped(). Without the
138 * memory barrier, there is a small possibility that
139 * start_xmit() will miss it and cause the queue to be stopped
144 /* TBD need a thresh? */
145 if (unlikely(netif_tx_queue_stopped(txq))) {
146 /* Taking tx_lock() is needed to prevent reenabling the queue
147 * while it's empty. This could have happen if rx_action() gets
148 * suspended in bnx2x_tx_int() after the condition before
149 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
151 * stops the queue->sees fresh tx_bd_cons->releases the queue->
152 * sends some packets consuming the whole queue again->
156 __netif_tx_lock(txq, smp_processor_id());
158 if ((netif_tx_queue_stopped(txq)) &&
159 (bp->state == BNX2X_STATE_OPEN) &&
160 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
161 netif_tx_wake_queue(txq);
163 __netif_tx_unlock(txq);
168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
171 u16 last_max = fp->last_max_sge;
173 if (SUB_S16(idx, last_max) > 0)
174 fp->last_max_sge = idx;
177 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
178 struct eth_fast_path_rx_cqe *fp_cqe)
180 struct bnx2x *bp = fp->bp;
181 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
182 le16_to_cpu(fp_cqe->len_on_bd)) >>
184 u16 last_max, last_elem, first_elem;
191 /* First mark all used pages */
192 for (i = 0; i < sge_len; i++)
193 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
195 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
196 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
198 /* Here we assume that the last SGE index is the biggest */
199 prefetch((void *)(fp->sge_mask));
200 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
202 last_max = RX_SGE(fp->last_max_sge);
203 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
204 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
206 /* If ring is not full */
207 if (last_elem + 1 != first_elem)
210 /* Now update the prod */
211 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
212 if (likely(fp->sge_mask[i]))
215 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
216 delta += RX_SGE_MASK_ELEM_SZ;
220 fp->rx_sge_prod += delta;
221 /* clear page-end entries */
222 bnx2x_clear_sge_mask_next_elems(fp);
225 DP(NETIF_MSG_RX_STATUS,
226 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
227 fp->last_max_sge, fp->rx_sge_prod);
230 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
231 struct sk_buff *skb, u16 cons, u16 prod)
233 struct bnx2x *bp = fp->bp;
234 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
235 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
236 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
239 /* move empty skb from pool to prod and map it */
240 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
241 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
242 bp->rx_buf_size, DMA_FROM_DEVICE);
243 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
245 /* move partial skb from cons to pool (don't unmap yet) */
246 fp->tpa_pool[queue] = *cons_rx_buf;
248 /* mark bin state as start - print error if current state != stop */
249 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
250 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
252 fp->tpa_state[queue] = BNX2X_TPA_START;
254 /* point prod_bd to new skb */
255 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
256 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
258 #ifdef BNX2X_STOP_ON_ERROR
259 fp->tpa_queue_used |= (1 << queue);
260 #ifdef _ASM_GENERIC_INT_L64_H
261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
263 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
269 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
271 struct eth_fast_path_rx_cqe *fp_cqe,
274 struct sw_rx_page *rx_pg, old_rx_pg;
275 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
276 u32 i, frag_len, frag_size, pages;
280 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
281 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
283 /* This is needed in order to enable forwarding support */
285 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
286 max(frag_size, (u32)len_on_bd));
288 #ifdef BNX2X_STOP_ON_ERROR
289 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
290 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
292 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
293 fp_cqe->pkt_len, len_on_bd);
299 /* Run through the SGL and compose the fragmented skb */
300 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
301 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
303 /* FW gives the indices of the SGE as if the ring is an array
304 (meaning that "next" element will consume 2 indices) */
305 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
306 rx_pg = &fp->rx_page_ring[sge_idx];
309 /* If we fail to allocate a substitute page, we simply stop
310 where we are and drop the whole packet */
311 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
313 fp->eth_q_stats.rx_skb_alloc_failed++;
317 /* Unmap the page as we r going to pass it to the stack */
318 dma_unmap_page(&bp->pdev->dev,
319 dma_unmap_addr(&old_rx_pg, mapping),
320 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
322 /* Add one frag and update the appropriate fields in the skb */
323 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
325 skb->data_len += frag_len;
326 skb->truesize += frag_len;
327 skb->len += frag_len;
329 frag_size -= frag_len;
335 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
336 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
339 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
340 struct sk_buff *skb = rx_buf->skb;
342 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
344 /* Unmap skb in the pool anyway, as we are going to change
345 pool entry status to BNX2X_TPA_STOP even if new skb allocation
347 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
348 bp->rx_buf_size, DMA_FROM_DEVICE);
350 if (likely(new_skb)) {
351 /* fix ip xsum and give it to the stack */
352 /* (no need to map the new skb) */
355 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
357 int is_not_hwaccel_vlan_cqe =
358 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
362 prefetch(((char *)(skb)) + 128);
364 #ifdef BNX2X_STOP_ON_ERROR
365 if (pad + len > bp->rx_buf_size) {
366 BNX2X_ERR("skb_put is about to fail... "
367 "pad %d len %d rx_buf_size %d\n",
368 pad, len, bp->rx_buf_size);
374 skb_reserve(skb, pad);
377 skb->protocol = eth_type_trans(skb, bp->dev);
378 skb->ip_summed = CHECKSUM_UNNECESSARY;
383 iph = (struct iphdr *)skb->data;
385 /* If there is no Rx VLAN offloading -
386 take VLAN tag into an account */
387 if (unlikely(is_not_hwaccel_vlan_cqe))
388 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
391 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
394 if (!bnx2x_fill_frag_skb(bp, fp, skb,
395 &cqe->fast_path_cqe, cqe_idx)) {
397 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
398 (!is_not_hwaccel_vlan_cqe))
399 vlan_gro_receive(&fp->napi, bp->vlgrp,
400 le16_to_cpu(cqe->fast_path_cqe.
404 napi_gro_receive(&fp->napi, skb);
406 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
407 " - dropping packet!\n");
412 /* put new skb in bin */
413 fp->tpa_pool[queue].skb = new_skb;
416 /* else drop the packet and keep the buffer in the bin */
417 DP(NETIF_MSG_RX_STATUS,
418 "Failed to allocate new skb - dropping packet!\n");
419 fp->eth_q_stats.rx_skb_alloc_failed++;
422 fp->tpa_state[queue] = BNX2X_TPA_STOP;
425 /* Set Toeplitz hash value in the skb using the value from the
426 * CQE (calculated by HW).
428 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
431 /* Set Toeplitz hash from CQE */
432 if ((bp->dev->features & NETIF_F_RXHASH) &&
433 (cqe->fast_path_cqe.status_flags &
434 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
436 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
439 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
441 struct bnx2x *bp = fp->bp;
442 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
443 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
446 #ifdef BNX2X_STOP_ON_ERROR
447 if (unlikely(bp->panic))
451 /* CQ "next element" is of the size of the regular element,
452 that's why it's ok here */
453 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
454 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
457 bd_cons = fp->rx_bd_cons;
458 bd_prod = fp->rx_bd_prod;
459 bd_prod_fw = bd_prod;
460 sw_comp_cons = fp->rx_comp_cons;
461 sw_comp_prod = fp->rx_comp_prod;
463 /* Memory barrier necessary as speculative reads of the rx
464 * buffer can be ahead of the index in the status block
468 DP(NETIF_MSG_RX_STATUS,
469 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
470 fp->index, hw_comp_cons, sw_comp_cons);
472 while (sw_comp_cons != hw_comp_cons) {
473 struct sw_rx_bd *rx_buf = NULL;
475 union eth_rx_cqe *cqe;
479 comp_ring_cons = RCQ_BD(sw_comp_cons);
480 bd_prod = RX_BD(bd_prod);
481 bd_cons = RX_BD(bd_cons);
483 /* Prefetch the page containing the BD descriptor
484 at producer's index. It will be needed when new skb is
486 prefetch((void *)(PAGE_ALIGN((unsigned long)
487 (&fp->rx_desc_ring[bd_prod])) -
490 cqe = &fp->rx_comp_ring[comp_ring_cons];
491 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
493 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
494 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
495 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
496 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
497 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
498 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
500 /* is this a slowpath msg? */
501 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
502 bnx2x_sp_event(fp, cqe);
505 /* this is an rx packet */
507 rx_buf = &fp->rx_buf_ring[bd_cons];
510 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
511 pad = cqe->fast_path_cqe.placement_offset;
513 /* If CQE is marked both TPA_START and TPA_END
514 it is a non-TPA CQE */
515 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) {
518 u16 queue = cqe->fast_path_cqe.queue_index;
520 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521 DP(NETIF_MSG_RX_STATUS,
522 "calling tpa_start on queue %d\n",
525 bnx2x_tpa_start(fp, queue, skb,
528 /* Set Toeplitz hash for an LRO skb */
529 bnx2x_set_skb_rxhash(bp, cqe, skb);
534 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
535 DP(NETIF_MSG_RX_STATUS,
536 "calling tpa_stop on queue %d\n",
539 if (!BNX2X_RX_SUM_FIX(cqe))
540 BNX2X_ERR("STOP on none TCP "
543 /* This is a size of the linear data
545 len = le16_to_cpu(cqe->fast_path_cqe.
547 bnx2x_tpa_stop(bp, fp, queue, pad,
548 len, cqe, comp_ring_cons);
549 #ifdef BNX2X_STOP_ON_ERROR
554 bnx2x_update_sge_prod(fp,
555 &cqe->fast_path_cqe);
560 dma_sync_single_for_device(&bp->pdev->dev,
561 dma_unmap_addr(rx_buf, mapping),
562 pad + RX_COPY_THRESH,
564 prefetch(((char *)(skb)) + 128);
566 /* is this an error packet? */
567 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
569 "ERROR flags %x rx packet %u\n",
570 cqe_fp_flags, sw_comp_cons);
571 fp->eth_q_stats.rx_err_discard_pkt++;
575 /* Since we don't have a jumbo ring
576 * copy small packets if mtu > 1500
578 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
579 (len <= RX_COPY_THRESH)) {
580 struct sk_buff *new_skb;
582 new_skb = netdev_alloc_skb(bp->dev,
584 if (new_skb == NULL) {
586 "ERROR packet dropped "
587 "because of alloc failure\n");
588 fp->eth_q_stats.rx_skb_alloc_failed++;
593 skb_copy_from_linear_data_offset(skb, pad,
594 new_skb->data + pad, len);
595 skb_reserve(new_skb, pad);
596 skb_put(new_skb, len);
598 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
603 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
604 dma_unmap_single(&bp->pdev->dev,
605 dma_unmap_addr(rx_buf, mapping),
608 skb_reserve(skb, pad);
613 "ERROR packet dropped because "
614 "of alloc failure\n");
615 fp->eth_q_stats.rx_skb_alloc_failed++;
617 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
621 skb->protocol = eth_type_trans(skb, bp->dev);
623 /* Set Toeplitz hash for a none-LRO skb */
624 bnx2x_set_skb_rxhash(bp, cqe, skb);
626 skb_checksum_none_assert(skb);
628 if (likely(BNX2X_RX_CSUM_OK(cqe)))
629 skb->ip_summed = CHECKSUM_UNNECESSARY;
631 fp->eth_q_stats.hw_csum_err++;
635 skb_record_rx_queue(skb, fp->index);
638 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
639 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
641 vlan_gro_receive(&fp->napi, bp->vlgrp,
642 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
645 napi_gro_receive(&fp->napi, skb);
651 bd_cons = NEXT_RX_IDX(bd_cons);
652 bd_prod = NEXT_RX_IDX(bd_prod);
653 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
656 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
657 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
659 if (rx_pkt == budget)
663 fp->rx_bd_cons = bd_cons;
664 fp->rx_bd_prod = bd_prod_fw;
665 fp->rx_comp_cons = sw_comp_cons;
666 fp->rx_comp_prod = sw_comp_prod;
668 /* Update producers */
669 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
672 fp->rx_pkt += rx_pkt;
678 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
680 struct bnx2x_fastpath *fp = fp_cookie;
681 struct bnx2x *bp = fp->bp;
683 /* Return here if interrupt is disabled */
684 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
685 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
689 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
690 fp->index, fp->sb_id);
691 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
693 #ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->status_blk->u_status_block.status_block_index);
702 prefetch(&fp->status_blk->c_status_block.status_block_index);
703 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
709 /* HW Lock for shared dual port PHYs */
710 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
712 mutex_lock(&bp->port.phy_mutex);
714 if (bp->port.need_hw_lock)
715 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
718 void bnx2x_release_phy_lock(struct bnx2x *bp)
720 if (bp->port.need_hw_lock)
721 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
723 mutex_unlock(&bp->port.phy_mutex);
726 void bnx2x_link_report(struct bnx2x *bp)
728 if (bp->flags & MF_FUNC_DIS) {
729 netif_carrier_off(bp->dev);
730 netdev_err(bp->dev, "NIC Link is Down\n");
734 if (bp->link_vars.link_up) {
737 if (bp->state == BNX2X_STATE_OPEN)
738 netif_carrier_on(bp->dev);
739 netdev_info(bp->dev, "NIC Link is Up, ");
741 line_speed = bp->link_vars.line_speed;
746 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
747 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
748 if (vn_max_rate < line_speed)
749 line_speed = vn_max_rate;
751 pr_cont("%d Mbps ", line_speed);
753 if (bp->link_vars.duplex == DUPLEX_FULL)
754 pr_cont("full duplex");
756 pr_cont("half duplex");
758 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
759 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
760 pr_cont(", receive ");
761 if (bp->link_vars.flow_ctrl &
763 pr_cont("& transmit ");
765 pr_cont(", transmit ");
767 pr_cont("flow control ON");
771 } else { /* link_down */
772 netif_carrier_off(bp->dev);
773 netdev_err(bp->dev, "NIC Link is Down\n");
777 void bnx2x_init_rx_rings(struct bnx2x *bp)
779 int func = BP_FUNC(bp);
780 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
781 ETH_MAX_AGGREGATION_QUEUES_E1H;
782 u16 ring_prod, cqe_ring_prod;
785 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
787 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
789 if (bp->flags & TPA_ENABLE_FLAG) {
791 for_each_queue(bp, j) {
792 struct bnx2x_fastpath *fp = &bp->fp[j];
794 for (i = 0; i < max_agg_queues; i++) {
795 fp->tpa_pool[i].skb =
796 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
797 if (!fp->tpa_pool[i].skb) {
798 BNX2X_ERR("Failed to allocate TPA "
799 "skb pool for queue[%d] - "
800 "disabling TPA on this "
802 bnx2x_free_tpa_pool(bp, fp, i);
806 dma_unmap_addr_set((struct sw_rx_bd *)
807 &bp->fp->tpa_pool[i],
809 fp->tpa_state[i] = BNX2X_TPA_STOP;
814 for_each_queue(bp, j) {
815 struct bnx2x_fastpath *fp = &bp->fp[j];
818 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
819 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
821 /* "next page" elements initialization */
823 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
824 struct eth_rx_sge *sge;
826 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
828 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
829 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
831 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
832 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
835 bnx2x_init_sge_ring_bit_mask(fp);
838 for (i = 1; i <= NUM_RX_RINGS; i++) {
839 struct eth_rx_bd *rx_bd;
841 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
843 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
844 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
846 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
847 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
851 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
852 struct eth_rx_cqe_next_page *nextpg;
854 nextpg = (struct eth_rx_cqe_next_page *)
855 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
857 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
858 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
860 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
861 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
864 /* Allocate SGEs and initialize the ring elements */
865 for (i = 0, ring_prod = 0;
866 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
868 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
869 BNX2X_ERR("was only able to allocate "
871 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
872 /* Cleanup already allocated elements */
873 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
874 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
879 ring_prod = NEXT_SGE_IDX(ring_prod);
881 fp->rx_sge_prod = ring_prod;
883 /* Allocate BDs and initialize BD ring */
884 fp->rx_comp_cons = 0;
885 cqe_ring_prod = ring_prod = 0;
886 for (i = 0; i < bp->rx_ring_size; i++) {
887 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
888 BNX2X_ERR("was only able to allocate "
889 "%d rx skbs on queue[%d]\n", i, j);
890 fp->eth_q_stats.rx_skb_alloc_failed++;
893 ring_prod = NEXT_RX_IDX(ring_prod);
894 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
895 WARN_ON(ring_prod <= i);
898 fp->rx_bd_prod = ring_prod;
899 /* must not have more available CQEs than BDs */
900 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
902 fp->rx_pkt = fp->rx_calls = 0;
905 * this will generate an interrupt (to the TSTORM)
906 * must only be done after chip is initialized
908 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
913 REG_WR(bp, BAR_USTRORM_INTMEM +
914 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
915 U64_LO(fp->rx_comp_mapping));
916 REG_WR(bp, BAR_USTRORM_INTMEM +
917 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
918 U64_HI(fp->rx_comp_mapping));
921 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
925 for_each_queue(bp, i) {
926 struct bnx2x_fastpath *fp = &bp->fp[i];
928 u16 bd_cons = fp->tx_bd_cons;
929 u16 sw_prod = fp->tx_pkt_prod;
930 u16 sw_cons = fp->tx_pkt_cons;
932 while (sw_cons != sw_prod) {
933 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
939 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
943 for_each_queue(bp, j) {
944 struct bnx2x_fastpath *fp = &bp->fp[j];
946 for (i = 0; i < NUM_RX_BD; i++) {
947 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
948 struct sk_buff *skb = rx_buf->skb;
953 dma_unmap_single(&bp->pdev->dev,
954 dma_unmap_addr(rx_buf, mapping),
955 bp->rx_buf_size, DMA_FROM_DEVICE);
960 if (!fp->disable_tpa)
961 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
962 ETH_MAX_AGGREGATION_QUEUES_E1 :
963 ETH_MAX_AGGREGATION_QUEUES_E1H);
967 void bnx2x_free_skbs(struct bnx2x *bp)
969 bnx2x_free_tx_skbs(bp);
970 bnx2x_free_rx_skbs(bp);
973 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
977 free_irq(bp->msix_table[0].vector, bp->dev);
978 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
979 bp->msix_table[0].vector);
984 for_each_queue(bp, i) {
985 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
986 "state %x\n", i, bp->msix_table[i + offset].vector,
987 bnx2x_fp(bp, i, state));
989 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
993 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
995 if (bp->flags & USING_MSIX_FLAG) {
997 bnx2x_free_msix_irqs(bp);
998 pci_disable_msix(bp->pdev);
999 bp->flags &= ~USING_MSIX_FLAG;
1001 } else if (bp->flags & USING_MSI_FLAG) {
1003 free_irq(bp->pdev->irq, bp->dev);
1004 pci_disable_msi(bp->pdev);
1005 bp->flags &= ~USING_MSI_FLAG;
1007 } else if (!disable_only)
1008 free_irq(bp->pdev->irq, bp->dev);
1011 static int bnx2x_enable_msix(struct bnx2x *bp)
1013 int i, rc, offset = 1;
1016 bp->msix_table[0].entry = igu_vec;
1017 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1020 igu_vec = BP_L_ID(bp) + offset;
1021 bp->msix_table[1].entry = igu_vec;
1022 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1025 for_each_queue(bp, i) {
1026 igu_vec = BP_L_ID(bp) + offset + i;
1027 bp->msix_table[i + offset].entry = igu_vec;
1028 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1029 "(fastpath #%u)\n", i + offset, igu_vec, i);
1032 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1033 BNX2X_NUM_QUEUES(bp) + offset);
1036 * reconfigure number of tx/rx queues according to available
1039 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1040 /* vectors available for FP */
1041 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1044 "Trying to use less MSI-X vectors: %d\n", rc);
1046 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1050 "MSI-X is not attainable rc %d\n", rc);
1054 bp->num_queues = min(bp->num_queues, fp_vec);
1056 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1059 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1063 bp->flags |= USING_MSIX_FLAG;
1068 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1070 int i, rc, offset = 1;
1072 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1073 bp->dev->name, bp->dev);
1075 BNX2X_ERR("request sp irq failed\n");
1082 for_each_queue(bp, i) {
1083 struct bnx2x_fastpath *fp = &bp->fp[i];
1084 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1087 rc = request_irq(bp->msix_table[i + offset].vector,
1088 bnx2x_msix_fp_int, 0, fp->name, fp);
1090 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1091 bnx2x_free_msix_irqs(bp);
1095 fp->state = BNX2X_FP_STATE_IRQ;
1098 i = BNX2X_NUM_QUEUES(bp);
1099 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1101 bp->msix_table[0].vector,
1102 0, bp->msix_table[offset].vector,
1103 i - 1, bp->msix_table[offset + i - 1].vector);
1108 static int bnx2x_enable_msi(struct bnx2x *bp)
1112 rc = pci_enable_msi(bp->pdev);
1114 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1117 bp->flags |= USING_MSI_FLAG;
1122 static int bnx2x_req_irq(struct bnx2x *bp)
1124 unsigned long flags;
1127 if (bp->flags & USING_MSI_FLAG)
1130 flags = IRQF_SHARED;
1132 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1133 bp->dev->name, bp->dev);
1135 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1140 static void bnx2x_napi_enable(struct bnx2x *bp)
1144 for_each_queue(bp, i)
1145 napi_enable(&bnx2x_fp(bp, i, napi));
1148 static void bnx2x_napi_disable(struct bnx2x *bp)
1152 for_each_queue(bp, i)
1153 napi_disable(&bnx2x_fp(bp, i, napi));
1156 void bnx2x_netif_start(struct bnx2x *bp)
1160 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1161 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1164 if (netif_running(bp->dev)) {
1165 bnx2x_napi_enable(bp);
1166 bnx2x_int_enable(bp);
1167 if (bp->state == BNX2X_STATE_OPEN)
1168 netif_tx_wake_all_queues(bp->dev);
1173 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1175 bnx2x_int_disable_sync(bp, disable_hw);
1176 bnx2x_napi_disable(bp);
1177 netif_tx_disable(bp->dev);
1179 static int bnx2x_set_num_queues(struct bnx2x *bp)
1183 switch (bp->int_mode) {
1187 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1190 /* Set number of queues according to bp->multi_mode value */
1191 bnx2x_set_num_queues_msix(bp);
1193 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1196 /* if we can't use MSI-X we only need one fp,
1197 * so try to enable MSI-X with the requested number of fp's
1198 * and fallback to MSI or legacy INTx with one fp
1200 rc = bnx2x_enable_msix(bp);
1202 /* failed to enable MSI-X */
1206 bp->dev->real_num_tx_queues = bp->num_queues;
1210 static void bnx2x_release_firmware(struct bnx2x *bp)
1212 kfree(bp->init_ops_offsets);
1213 kfree(bp->init_ops);
1214 kfree(bp->init_data);
1215 release_firmware(bp->firmware);
1218 /* must be called with rtnl_lock */
1219 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1224 /* Set init arrays */
1225 rc = bnx2x_init_firmware(bp);
1227 BNX2X_ERR("Error loading firmware\n");
1231 #ifdef BNX2X_STOP_ON_ERROR
1232 if (unlikely(bp->panic))
1236 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1238 rc = bnx2x_set_num_queues(bp);
1240 if (bnx2x_alloc_mem(bp)) {
1241 bnx2x_free_irq(bp, true);
1245 for_each_queue(bp, i)
1246 bnx2x_fp(bp, i, disable_tpa) =
1247 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1249 for_each_queue(bp, i)
1250 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1253 bnx2x_napi_enable(bp);
1255 if (bp->flags & USING_MSIX_FLAG) {
1256 rc = bnx2x_req_msix_irqs(bp);
1258 bnx2x_free_irq(bp, true);
1262 /* Fall to INTx if failed to enable MSI-X due to lack of
1263 memory (in bnx2x_set_num_queues()) */
1264 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1265 bnx2x_enable_msi(bp);
1267 rc = bnx2x_req_irq(bp);
1269 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1270 bnx2x_free_irq(bp, true);
1273 if (bp->flags & USING_MSI_FLAG) {
1274 bp->dev->irq = bp->pdev->irq;
1275 netdev_info(bp->dev, "using MSI IRQ %d\n",
1280 /* Send LOAD_REQUEST command to MCP
1281 Returns the type of LOAD command:
1282 if it is the first port to be initialized
1283 common blocks should be initialized, otherwise - not
1285 if (!BP_NOMCP(bp)) {
1286 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1288 BNX2X_ERR("MCP response failure, aborting\n");
1292 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1293 rc = -EBUSY; /* other port in diagnostic mode */
1298 int port = BP_PORT(bp);
1300 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1301 load_count[0], load_count[1], load_count[2]);
1303 load_count[1 + port]++;
1304 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1305 load_count[0], load_count[1], load_count[2]);
1306 if (load_count[0] == 1)
1307 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1308 else if (load_count[1 + port] == 1)
1309 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1311 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1314 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1315 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1319 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1322 rc = bnx2x_init_hw(bp, load_code);
1324 BNX2X_ERR("HW init failed, aborting\n");
1325 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1326 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1327 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1331 /* Setup NIC internals and enable interrupts */
1332 bnx2x_nic_init(bp, load_code);
1334 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1335 (bp->common.shmem2_base))
1336 SHMEM2_WR(bp, dcc_support,
1337 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1338 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1340 /* Send LOAD_DONE command to MCP */
1341 if (!BP_NOMCP(bp)) {
1342 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1344 BNX2X_ERR("MCP response failure, aborting\n");
1350 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1352 rc = bnx2x_setup_leading(bp);
1354 BNX2X_ERR("Setup leading failed!\n");
1355 #ifndef BNX2X_STOP_ON_ERROR
1363 if (CHIP_IS_E1H(bp))
1364 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1365 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1366 bp->flags |= MF_FUNC_DIS;
1369 if (bp->state == BNX2X_STATE_OPEN) {
1371 /* Enable Timer scan */
1372 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1374 for_each_nondefault_queue(bp, i) {
1375 rc = bnx2x_setup_multi(bp, i);
1385 bnx2x_set_eth_mac_addr_e1(bp, 1);
1387 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1389 /* Set iSCSI L2 MAC */
1390 mutex_lock(&bp->cnic_mutex);
1391 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1392 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1393 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1394 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1397 mutex_unlock(&bp->cnic_mutex);
1402 bnx2x_initial_phy_init(bp, load_mode);
1404 /* Start fast path */
1405 switch (load_mode) {
1407 if (bp->state == BNX2X_STATE_OPEN) {
1408 /* Tx queue should be only reenabled */
1409 netif_tx_wake_all_queues(bp->dev);
1411 /* Initialize the receive filter. */
1412 bnx2x_set_rx_mode(bp->dev);
1416 netif_tx_start_all_queues(bp->dev);
1417 if (bp->state != BNX2X_STATE_OPEN)
1418 netif_tx_disable(bp->dev);
1419 /* Initialize the receive filter. */
1420 bnx2x_set_rx_mode(bp->dev);
1424 /* Initialize the receive filter. */
1425 bnx2x_set_rx_mode(bp->dev);
1426 bp->state = BNX2X_STATE_DIAG;
1434 bnx2x__link_status_update(bp);
1436 /* start the timer */
1437 mod_timer(&bp->timer, jiffies + bp->current_interval);
1440 bnx2x_setup_cnic_irq_info(bp);
1441 if (bp->state == BNX2X_STATE_OPEN)
1442 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1444 bnx2x_inc_load_cnt(bp);
1446 bnx2x_release_firmware(bp);
1452 /* Disable Timer scan */
1453 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1456 bnx2x_int_disable_sync(bp, 1);
1457 if (!BP_NOMCP(bp)) {
1458 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1459 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1462 /* Free SKBs, SGEs, TPA pool and driver internals */
1463 bnx2x_free_skbs(bp);
1464 for_each_queue(bp, i)
1465 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1468 bnx2x_free_irq(bp, false);
1470 bnx2x_napi_disable(bp);
1471 for_each_queue(bp, i)
1472 netif_napi_del(&bnx2x_fp(bp, i, napi));
1475 bnx2x_release_firmware(bp);
1480 /* must be called with rtnl_lock */
1481 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1485 if (bp->state == BNX2X_STATE_CLOSED) {
1486 /* Interface has been removed - nothing to recover */
1487 bp->recovery_state = BNX2X_RECOVERY_DONE;
1489 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1496 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1498 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1500 /* Set "drop all" */
1501 bp->rx_mode = BNX2X_RX_MODE_NONE;
1502 bnx2x_set_storm_rx_mode(bp);
1504 /* Disable HW interrupts, NAPI and Tx */
1505 bnx2x_netif_stop(bp, 1);
1506 netif_carrier_off(bp->dev);
1508 del_timer_sync(&bp->timer);
1509 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1510 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1511 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1514 bnx2x_free_irq(bp, false);
1516 /* Cleanup the chip if needed */
1517 if (unload_mode != UNLOAD_RECOVERY)
1518 bnx2x_chip_cleanup(bp, unload_mode);
1522 /* Free SKBs, SGEs, TPA pool and driver internals */
1523 bnx2x_free_skbs(bp);
1524 for_each_queue(bp, i)
1525 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1526 for_each_queue(bp, i)
1527 netif_napi_del(&bnx2x_fp(bp, i, napi));
1530 bp->state = BNX2X_STATE_CLOSED;
1532 /* The last driver must disable a "close the gate" if there is no
1533 * parity attention or "process kill" pending.
1535 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1536 bnx2x_reset_is_done(bp))
1537 bnx2x_disable_close_the_gate(bp);
1539 /* Reset MCP mail box sequence if there is on going recovery */
1540 if (unload_mode == UNLOAD_RECOVERY)
1545 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1549 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1553 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1554 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1555 PCI_PM_CTRL_PME_STATUS));
1557 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1558 /* delay required during transition out of D3hot */
1563 /* If there are other clients above don't
1564 shut down the power */
1565 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1567 /* Don't shut down the power for emulation and FPGA */
1568 if (CHIP_REV_IS_SLOW(bp))
1571 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1575 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1577 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1580 /* No more memory access after this point until
1581 * device is brought back to D0.
1594 * net_device service functions
1597 static int bnx2x_poll(struct napi_struct *napi, int budget)
1600 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1602 struct bnx2x *bp = fp->bp;
1605 #ifdef BNX2X_STOP_ON_ERROR
1606 if (unlikely(bp->panic)) {
1607 napi_complete(napi);
1612 if (bnx2x_has_tx_work(fp))
1615 if (bnx2x_has_rx_work(fp)) {
1616 work_done += bnx2x_rx_int(fp, budget - work_done);
1618 /* must not complete if we consumed full budget */
1619 if (work_done >= budget)
1623 /* Fall out from the NAPI loop if needed */
1624 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1625 bnx2x_update_fpsb_idx(fp);
1626 /* bnx2x_has_rx_work() reads the status block, thus we need
1627 * to ensure that status block indices have been actually read
1628 * (bnx2x_update_fpsb_idx) prior to this check
1629 * (bnx2x_has_rx_work) so that we won't write the "newer"
1630 * value of the status block to IGU (if there was a DMA right
1631 * after bnx2x_has_rx_work and if there is no rmb, the memory
1632 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1633 * before bnx2x_ack_sb). In this case there will never be
1634 * another interrupt until there is another update of the
1635 * status block, while there is still unhandled work.
1639 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1640 napi_complete(napi);
1641 /* Re-enable interrupts */
1642 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1643 le16_to_cpu(fp->fp_c_idx),
1645 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1646 le16_to_cpu(fp->fp_u_idx),
1657 /* we split the first BD into headers and data BDs
1658 * to ease the pain of our fellow microcode engineers
1659 * we use one mapping for both BDs
1660 * So far this has only been observed to happen
1661 * in Other Operating Systems(TM)
1663 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1664 struct bnx2x_fastpath *fp,
1665 struct sw_tx_bd *tx_buf,
1666 struct eth_tx_start_bd **tx_bd, u16 hlen,
1667 u16 bd_prod, int nbd)
1669 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1670 struct eth_tx_bd *d_tx_bd;
1672 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1674 /* first fix first BD */
1675 h_tx_bd->nbd = cpu_to_le16(nbd);
1676 h_tx_bd->nbytes = cpu_to_le16(hlen);
1678 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1679 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1680 h_tx_bd->addr_lo, h_tx_bd->nbd);
1682 /* now get a new data BD
1683 * (after the pbd) and fill it */
1684 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1685 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1687 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1688 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1690 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1691 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1692 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1694 /* this marks the BD as one that has no individual mapping */
1695 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1697 DP(NETIF_MSG_TX_QUEUED,
1698 "TSO split data size is %d (%x:%x)\n",
1699 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1702 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1707 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1710 csum = (u16) ~csum_fold(csum_sub(csum,
1711 csum_partial(t_header - fix, fix, 0)));
1714 csum = (u16) ~csum_fold(csum_add(csum,
1715 csum_partial(t_header, -fix, 0)));
1717 return swab16(csum);
1720 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1724 if (skb->ip_summed != CHECKSUM_PARTIAL)
1728 if (skb->protocol == htons(ETH_P_IPV6)) {
1730 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1731 rc |= XMIT_CSUM_TCP;
1735 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1736 rc |= XMIT_CSUM_TCP;
1740 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1741 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1743 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1744 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1749 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1750 /* check if packet requires linearization (packet is too fragmented)
1751 no need to check fragmentation if page size > 8K (there will be no
1752 violation to FW restrictions) */
1753 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1758 int first_bd_sz = 0;
1760 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1761 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1763 if (xmit_type & XMIT_GSO) {
1764 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1765 /* Check if LSO packet needs to be copied:
1766 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1767 int wnd_size = MAX_FETCH_BD - 3;
1768 /* Number of windows to check */
1769 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1774 /* Headers length */
1775 hlen = (int)(skb_transport_header(skb) - skb->data) +
1778 /* Amount of data (w/o headers) on linear part of SKB*/
1779 first_bd_sz = skb_headlen(skb) - hlen;
1781 wnd_sum = first_bd_sz;
1783 /* Calculate the first sum - it's special */
1784 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1786 skb_shinfo(skb)->frags[frag_idx].size;
1788 /* If there was data on linear skb data - check it */
1789 if (first_bd_sz > 0) {
1790 if (unlikely(wnd_sum < lso_mss)) {
1795 wnd_sum -= first_bd_sz;
1798 /* Others are easier: run through the frag list and
1799 check all windows */
1800 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1802 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1804 if (unlikely(wnd_sum < lso_mss)) {
1809 skb_shinfo(skb)->frags[wnd_idx].size;
1812 /* in non-LSO too fragmented packet should always
1819 if (unlikely(to_copy))
1820 DP(NETIF_MSG_TX_QUEUED,
1821 "Linearization IS REQUIRED for %s packet. "
1822 "num_frags %d hlen %d first_bd_sz %d\n",
1823 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1824 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1830 /* called with netif_tx_lock
1831 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1832 * netif_wake_queue()
1834 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1836 struct bnx2x *bp = netdev_priv(dev);
1837 struct bnx2x_fastpath *fp;
1838 struct netdev_queue *txq;
1839 struct sw_tx_bd *tx_buf;
1840 struct eth_tx_start_bd *tx_start_bd;
1841 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1842 struct eth_tx_parse_bd *pbd = NULL;
1843 u16 pkt_prod, bd_prod;
1846 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1849 __le16 pkt_size = 0;
1851 u8 mac_type = UNICAST_ADDRESS;
1853 #ifdef BNX2X_STOP_ON_ERROR
1854 if (unlikely(bp->panic))
1855 return NETDEV_TX_BUSY;
1858 fp_index = skb_get_queue_mapping(skb);
1859 txq = netdev_get_tx_queue(dev, fp_index);
1861 fp = &bp->fp[fp_index];
1863 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1864 fp->eth_q_stats.driver_xoff++;
1865 netif_tx_stop_queue(txq);
1866 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1867 return NETDEV_TX_BUSY;
1870 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1871 " gso type %x xmit_type %x\n",
1872 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1873 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1875 eth = (struct ethhdr *)skb->data;
1877 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1878 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1879 if (is_broadcast_ether_addr(eth->h_dest))
1880 mac_type = BROADCAST_ADDRESS;
1882 mac_type = MULTICAST_ADDRESS;
1885 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1886 /* First, check if we need to linearize the skb (due to FW
1887 restrictions). No need to check fragmentation if page size > 8K
1888 (there will be no violation to FW restrictions) */
1889 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1890 /* Statistics of linearization */
1892 if (skb_linearize(skb) != 0) {
1893 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1894 "silently dropping this SKB\n");
1895 dev_kfree_skb_any(skb);
1896 return NETDEV_TX_OK;
1902 Please read carefully. First we use one BD which we mark as start,
1903 then we have a parsing info BD (used for TSO or xsum),
1904 and only then we have the rest of the TSO BDs.
1905 (don't forget to mark the last one as last,
1906 and to unmap only AFTER you write to the BD ...)
1907 And above all, all pdb sizes are in words - NOT DWORDS!
1910 pkt_prod = fp->tx_pkt_prod++;
1911 bd_prod = TX_BD(fp->tx_bd_prod);
1913 /* get a tx_buf and first BD */
1914 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1915 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1917 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1918 tx_start_bd->general_data = (mac_type <<
1919 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1921 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1923 /* remember the first BD of the packet */
1924 tx_buf->first_bd = fp->tx_bd_prod;
1928 DP(NETIF_MSG_TX_QUEUED,
1929 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1930 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1933 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1934 (bp->flags & HW_VLAN_TX_FLAG)) {
1935 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1936 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1939 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1941 /* turn on parsing and get a BD */
1942 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1943 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1945 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1947 if (xmit_type & XMIT_CSUM) {
1948 hlen = (skb_network_header(skb) - skb->data) / 2;
1950 /* for now NS flag is not used in Linux */
1952 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1953 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1955 pbd->ip_hlen = (skb_transport_header(skb) -
1956 skb_network_header(skb)) / 2;
1958 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1960 pbd->total_hlen = cpu_to_le16(hlen);
1963 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1965 if (xmit_type & XMIT_CSUM_V4)
1966 tx_start_bd->bd_flags.as_bitfield |=
1967 ETH_TX_BD_FLAGS_IP_CSUM;
1969 tx_start_bd->bd_flags.as_bitfield |=
1970 ETH_TX_BD_FLAGS_IPV6;
1972 if (xmit_type & XMIT_CSUM_TCP) {
1973 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1976 s8 fix = SKB_CS_OFF(skb); /* signed! */
1978 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1980 DP(NETIF_MSG_TX_QUEUED,
1981 "hlen %d fix %d csum before fix %x\n",
1982 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1984 /* HW bug: fixup the CSUM */
1985 pbd->tcp_pseudo_csum =
1986 bnx2x_csum_fix(skb_transport_header(skb),
1989 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1990 pbd->tcp_pseudo_csum);
1994 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1995 skb_headlen(skb), DMA_TO_DEVICE);
1997 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1998 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1999 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2000 tx_start_bd->nbd = cpu_to_le16(nbd);
2001 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2002 pkt_size = tx_start_bd->nbytes;
2004 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2005 " nbytes %d flags %x vlan %x\n",
2006 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2007 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2008 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
2010 if (xmit_type & XMIT_GSO) {
2012 DP(NETIF_MSG_TX_QUEUED,
2013 "TSO packet len %d hlen %d total len %d tso size %d\n",
2014 skb->len, hlen, skb_headlen(skb),
2015 skb_shinfo(skb)->gso_size);
2017 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2019 if (unlikely(skb_headlen(skb) > hlen))
2020 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2021 hlen, bd_prod, ++nbd);
2023 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2024 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2025 pbd->tcp_flags = pbd_tcp_flags(skb);
2027 if (xmit_type & XMIT_GSO_V4) {
2028 pbd->ip_id = swab16(ip_hdr(skb)->id);
2029 pbd->tcp_pseudo_csum =
2030 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2032 0, IPPROTO_TCP, 0));
2035 pbd->tcp_pseudo_csum =
2036 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2037 &ipv6_hdr(skb)->daddr,
2038 0, IPPROTO_TCP, 0));
2040 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2042 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2044 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2045 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2047 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2048 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2049 if (total_pkt_bd == NULL)
2050 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2052 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2054 frag->size, DMA_TO_DEVICE);
2056 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2057 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2058 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2059 le16_add_cpu(&pkt_size, frag->size);
2061 DP(NETIF_MSG_TX_QUEUED,
2062 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2063 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2064 le16_to_cpu(tx_data_bd->nbytes));
2067 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2069 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2071 /* now send a tx doorbell, counting the next BD
2072 * if the packet contains or ends with it
2074 if (TX_BD_POFF(bd_prod) < nbd)
2077 if (total_pkt_bd != NULL)
2078 total_pkt_bd->total_pkt_bytes = pkt_size;
2081 DP(NETIF_MSG_TX_QUEUED,
2082 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2083 " tcp_flags %x xsum %x seq %u hlen %u\n",
2084 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2085 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2086 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2088 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2091 * Make sure that the BD data is updated before updating the producer
2092 * since FW might read the BD right after the producer is updated.
2093 * This is only applicable for weak-ordered memory model archs such
2094 * as IA-64. The following barrier is also mandatory since FW will
2095 * assumes packets must have BDs.
2099 fp->tx_db.data.prod += nbd;
2101 DOORBELL(bp, fp->index, fp->tx_db.raw);
2105 fp->tx_bd_prod += nbd;
2107 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2108 netif_tx_stop_queue(txq);
2110 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2111 * ordering of set_bit() in netif_tx_stop_queue() and read of
2115 fp->eth_q_stats.driver_xoff++;
2116 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2117 netif_tx_wake_queue(txq);
2121 return NETDEV_TX_OK;
2123 /* called with rtnl_lock */
2124 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2126 struct sockaddr *addr = p;
2127 struct bnx2x *bp = netdev_priv(dev);
2129 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2132 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2133 if (netif_running(dev)) {
2135 bnx2x_set_eth_mac_addr_e1(bp, 1);
2137 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2143 /* called with rtnl_lock */
2144 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2146 struct bnx2x *bp = netdev_priv(dev);
2149 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2150 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2154 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2155 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2158 /* This does not race with packet allocation
2159 * because the actual alloc size is
2160 * only updated as part of load
2164 if (netif_running(dev)) {
2165 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2166 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2172 void bnx2x_tx_timeout(struct net_device *dev)
2174 struct bnx2x *bp = netdev_priv(dev);
2176 #ifdef BNX2X_STOP_ON_ERROR
2180 /* This allows the netif to be shutdown gracefully before resetting */
2181 schedule_delayed_work(&bp->reset_task, 0);
2185 /* called with rtnl_lock */
2186 void bnx2x_vlan_rx_register(struct net_device *dev,
2187 struct vlan_group *vlgrp)
2189 struct bnx2x *bp = netdev_priv(dev);
2193 /* Set flags according to the required capabilities */
2194 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2196 if (dev->features & NETIF_F_HW_VLAN_TX)
2197 bp->flags |= HW_VLAN_TX_FLAG;
2199 if (dev->features & NETIF_F_HW_VLAN_RX)
2200 bp->flags |= HW_VLAN_RX_FLAG;
2202 if (netif_running(dev))
2203 bnx2x_set_client_config(bp);
2207 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2209 struct net_device *dev = pci_get_drvdata(pdev);
2213 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2216 bp = netdev_priv(dev);
2220 pci_save_state(pdev);
2222 if (!netif_running(dev)) {
2227 netif_device_detach(dev);
2229 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2231 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2238 int bnx2x_resume(struct pci_dev *pdev)
2240 struct net_device *dev = pci_get_drvdata(pdev);
2245 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2248 bp = netdev_priv(dev);
2250 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2251 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2257 pci_restore_state(pdev);
2259 if (!netif_running(dev)) {
2264 bnx2x_set_power_state(bp, PCI_D0);
2265 netif_device_attach(dev);
2267 rc = bnx2x_nic_load(bp, LOAD_OPEN);