1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
19 #include <linux/etherdevice.h>
21 #include <linux/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
27 #include <linux/if_vlan.h>
30 static int bnx2x_poll(struct napi_struct *napi, int budget);
32 /* free skb in the packet ring at pos idx
33 * return idx of last bd freed
35 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
55 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58 #ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
64 new_cons = nbd + tx_buf->first_bd;
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69 /* Skip a parse bd... */
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
105 #ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
114 while (sw_cons != hw_cons) {
117 pkt_cons = TX_BD(sw_cons);
119 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
121 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
122 hw_cons, sw_cons, pkt_cons);
124 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
126 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
129 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
133 fp->tx_pkt_cons = sw_cons;
134 fp->tx_bd_cons = bd_cons;
136 /* Need to make the tx_bd_cons update visible to start_xmit()
137 * before checking for netif_tx_queue_stopped(). Without the
138 * memory barrier, there is a small possibility that
139 * start_xmit() will miss it and cause the queue to be stopped
144 /* TBD need a thresh? */
145 if (unlikely(netif_tx_queue_stopped(txq))) {
146 /* Taking tx_lock() is needed to prevent reenabling the queue
147 * while it's empty. This could have happen if rx_action() gets
148 * suspended in bnx2x_tx_int() after the condition before
149 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
151 * stops the queue->sees fresh tx_bd_cons->releases the queue->
152 * sends some packets consuming the whole queue again->
156 __netif_tx_lock(txq, smp_processor_id());
158 if ((netif_tx_queue_stopped(txq)) &&
159 (bp->state == BNX2X_STATE_OPEN) &&
160 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
161 netif_tx_wake_queue(txq);
163 __netif_tx_unlock(txq);
168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
171 u16 last_max = fp->last_max_sge;
173 if (SUB_S16(idx, last_max) > 0)
174 fp->last_max_sge = idx;
177 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
178 struct eth_fast_path_rx_cqe *fp_cqe)
180 struct bnx2x *bp = fp->bp;
181 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
182 le16_to_cpu(fp_cqe->len_on_bd)) >>
184 u16 last_max, last_elem, first_elem;
191 /* First mark all used pages */
192 for (i = 0; i < sge_len; i++)
193 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
195 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
196 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
198 /* Here we assume that the last SGE index is the biggest */
199 prefetch((void *)(fp->sge_mask));
200 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
202 last_max = RX_SGE(fp->last_max_sge);
203 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
204 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
206 /* If ring is not full */
207 if (last_elem + 1 != first_elem)
210 /* Now update the prod */
211 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
212 if (likely(fp->sge_mask[i]))
215 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
216 delta += RX_SGE_MASK_ELEM_SZ;
220 fp->rx_sge_prod += delta;
221 /* clear page-end entries */
222 bnx2x_clear_sge_mask_next_elems(fp);
225 DP(NETIF_MSG_RX_STATUS,
226 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
227 fp->last_max_sge, fp->rx_sge_prod);
230 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
231 struct sk_buff *skb, u16 cons, u16 prod)
233 struct bnx2x *bp = fp->bp;
234 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
235 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
236 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
239 /* move empty skb from pool to prod and map it */
240 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
241 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
242 bp->rx_buf_size, DMA_FROM_DEVICE);
243 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
245 /* move partial skb from cons to pool (don't unmap yet) */
246 fp->tpa_pool[queue] = *cons_rx_buf;
248 /* mark bin state as start - print error if current state != stop */
249 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
250 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
252 fp->tpa_state[queue] = BNX2X_TPA_START;
254 /* point prod_bd to new skb */
255 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
256 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
258 #ifdef BNX2X_STOP_ON_ERROR
259 fp->tpa_queue_used |= (1 << queue);
260 #ifdef _ASM_GENERIC_INT_L64_H
261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
263 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
269 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
271 struct eth_fast_path_rx_cqe *fp_cqe,
274 struct sw_rx_page *rx_pg, old_rx_pg;
275 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
276 u32 i, frag_len, frag_size, pages;
280 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
281 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
283 /* This is needed in order to enable forwarding support */
285 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
286 max(frag_size, (u32)len_on_bd));
288 #ifdef BNX2X_STOP_ON_ERROR
289 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
290 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
292 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
293 fp_cqe->pkt_len, len_on_bd);
299 /* Run through the SGL and compose the fragmented skb */
300 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
301 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
303 /* FW gives the indices of the SGE as if the ring is an array
304 (meaning that "next" element will consume 2 indices) */
305 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
306 rx_pg = &fp->rx_page_ring[sge_idx];
309 /* If we fail to allocate a substitute page, we simply stop
310 where we are and drop the whole packet */
311 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
313 fp->eth_q_stats.rx_skb_alloc_failed++;
317 /* Unmap the page as we r going to pass it to the stack */
318 dma_unmap_page(&bp->pdev->dev,
319 dma_unmap_addr(&old_rx_pg, mapping),
320 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
322 /* Add one frag and update the appropriate fields in the skb */
323 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
325 skb->data_len += frag_len;
326 skb->truesize += frag_len;
327 skb->len += frag_len;
329 frag_size -= frag_len;
335 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
336 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
339 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
340 struct sk_buff *skb = rx_buf->skb;
342 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
344 /* Unmap skb in the pool anyway, as we are going to change
345 pool entry status to BNX2X_TPA_STOP even if new skb allocation
347 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
348 bp->rx_buf_size, DMA_FROM_DEVICE);
350 if (likely(new_skb)) {
351 /* fix ip xsum and give it to the stack */
352 /* (no need to map the new skb) */
355 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
357 int is_not_hwaccel_vlan_cqe =
358 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
362 prefetch(((char *)(skb)) + 128);
364 #ifdef BNX2X_STOP_ON_ERROR
365 if (pad + len > bp->rx_buf_size) {
366 BNX2X_ERR("skb_put is about to fail... "
367 "pad %d len %d rx_buf_size %d\n",
368 pad, len, bp->rx_buf_size);
374 skb_reserve(skb, pad);
377 skb->protocol = eth_type_trans(skb, bp->dev);
378 skb->ip_summed = CHECKSUM_UNNECESSARY;
383 iph = (struct iphdr *)skb->data;
385 /* If there is no Rx VLAN offloading -
386 take VLAN tag into an account */
387 if (unlikely(is_not_hwaccel_vlan_cqe))
388 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
391 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
394 if (!bnx2x_fill_frag_skb(bp, fp, skb,
395 &cqe->fast_path_cqe, cqe_idx)) {
397 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
398 (!is_not_hwaccel_vlan_cqe))
399 vlan_gro_receive(&fp->napi, bp->vlgrp,
400 le16_to_cpu(cqe->fast_path_cqe.
404 napi_gro_receive(&fp->napi, skb);
406 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
407 " - dropping packet!\n");
412 /* put new skb in bin */
413 fp->tpa_pool[queue].skb = new_skb;
416 /* else drop the packet and keep the buffer in the bin */
417 DP(NETIF_MSG_RX_STATUS,
418 "Failed to allocate new skb - dropping packet!\n");
419 fp->eth_q_stats.rx_skb_alloc_failed++;
422 fp->tpa_state[queue] = BNX2X_TPA_STOP;
425 /* Set Toeplitz hash value in the skb using the value from the
426 * CQE (calculated by HW).
428 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
431 /* Set Toeplitz hash from CQE */
432 if ((bp->dev->features & NETIF_F_RXHASH) &&
433 (cqe->fast_path_cqe.status_flags &
434 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
436 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
439 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
441 struct bnx2x *bp = fp->bp;
442 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
443 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
446 #ifdef BNX2X_STOP_ON_ERROR
447 if (unlikely(bp->panic))
451 /* CQ "next element" is of the size of the regular element,
452 that's why it's ok here */
453 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
454 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
457 bd_cons = fp->rx_bd_cons;
458 bd_prod = fp->rx_bd_prod;
459 bd_prod_fw = bd_prod;
460 sw_comp_cons = fp->rx_comp_cons;
461 sw_comp_prod = fp->rx_comp_prod;
463 /* Memory barrier necessary as speculative reads of the rx
464 * buffer can be ahead of the index in the status block
468 DP(NETIF_MSG_RX_STATUS,
469 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
470 fp->index, hw_comp_cons, sw_comp_cons);
472 while (sw_comp_cons != hw_comp_cons) {
473 struct sw_rx_bd *rx_buf = NULL;
475 union eth_rx_cqe *cqe;
479 comp_ring_cons = RCQ_BD(sw_comp_cons);
480 bd_prod = RX_BD(bd_prod);
481 bd_cons = RX_BD(bd_cons);
483 /* Prefetch the page containing the BD descriptor
484 at producer's index. It will be needed when new skb is
486 prefetch((void *)(PAGE_ALIGN((unsigned long)
487 (&fp->rx_desc_ring[bd_prod])) -
490 cqe = &fp->rx_comp_ring[comp_ring_cons];
491 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
493 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
494 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
495 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
496 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
497 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
498 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
500 /* is this a slowpath msg? */
501 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
502 bnx2x_sp_event(fp, cqe);
505 /* this is an rx packet */
507 rx_buf = &fp->rx_buf_ring[bd_cons];
510 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
511 pad = cqe->fast_path_cqe.placement_offset;
513 /* If CQE is marked both TPA_START and TPA_END
514 it is a non-TPA CQE */
515 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) {
518 u16 queue = cqe->fast_path_cqe.queue_index;
520 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521 DP(NETIF_MSG_RX_STATUS,
522 "calling tpa_start on queue %d\n",
525 bnx2x_tpa_start(fp, queue, skb,
528 /* Set Toeplitz hash for an LRO skb */
529 bnx2x_set_skb_rxhash(bp, cqe, skb);
534 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
535 DP(NETIF_MSG_RX_STATUS,
536 "calling tpa_stop on queue %d\n",
539 if (!BNX2X_RX_SUM_FIX(cqe))
540 BNX2X_ERR("STOP on none TCP "
543 /* This is a size of the linear data
545 len = le16_to_cpu(cqe->fast_path_cqe.
547 bnx2x_tpa_stop(bp, fp, queue, pad,
548 len, cqe, comp_ring_cons);
549 #ifdef BNX2X_STOP_ON_ERROR
554 bnx2x_update_sge_prod(fp,
555 &cqe->fast_path_cqe);
560 dma_sync_single_for_device(&bp->pdev->dev,
561 dma_unmap_addr(rx_buf, mapping),
562 pad + RX_COPY_THRESH,
564 prefetch(((char *)(skb)) + 128);
566 /* is this an error packet? */
567 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
569 "ERROR flags %x rx packet %u\n",
570 cqe_fp_flags, sw_comp_cons);
571 fp->eth_q_stats.rx_err_discard_pkt++;
575 /* Since we don't have a jumbo ring
576 * copy small packets if mtu > 1500
578 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
579 (len <= RX_COPY_THRESH)) {
580 struct sk_buff *new_skb;
582 new_skb = netdev_alloc_skb(bp->dev,
584 if (new_skb == NULL) {
586 "ERROR packet dropped "
587 "because of alloc failure\n");
588 fp->eth_q_stats.rx_skb_alloc_failed++;
593 skb_copy_from_linear_data_offset(skb, pad,
594 new_skb->data + pad, len);
595 skb_reserve(new_skb, pad);
596 skb_put(new_skb, len);
598 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
603 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
604 dma_unmap_single(&bp->pdev->dev,
605 dma_unmap_addr(rx_buf, mapping),
608 skb_reserve(skb, pad);
613 "ERROR packet dropped because "
614 "of alloc failure\n");
615 fp->eth_q_stats.rx_skb_alloc_failed++;
617 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
621 skb->protocol = eth_type_trans(skb, bp->dev);
623 /* Set Toeplitz hash for a none-LRO skb */
624 bnx2x_set_skb_rxhash(bp, cqe, skb);
626 skb_checksum_none_assert(skb);
628 if (likely(BNX2X_RX_CSUM_OK(cqe)))
629 skb->ip_summed = CHECKSUM_UNNECESSARY;
631 fp->eth_q_stats.hw_csum_err++;
635 skb_record_rx_queue(skb, fp->index);
638 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
639 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
641 vlan_gro_receive(&fp->napi, bp->vlgrp,
642 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
645 napi_gro_receive(&fp->napi, skb);
651 bd_cons = NEXT_RX_IDX(bd_cons);
652 bd_prod = NEXT_RX_IDX(bd_prod);
653 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
656 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
657 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
659 if (rx_pkt == budget)
663 fp->rx_bd_cons = bd_cons;
664 fp->rx_bd_prod = bd_prod_fw;
665 fp->rx_comp_cons = sw_comp_cons;
666 fp->rx_comp_prod = sw_comp_prod;
668 /* Update producers */
669 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
672 fp->rx_pkt += rx_pkt;
678 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
680 struct bnx2x_fastpath *fp = fp_cookie;
681 struct bnx2x *bp = fp->bp;
683 /* Return here if interrupt is disabled */
684 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
685 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
689 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
690 fp->index, fp->sb_id);
691 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
693 #ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->status_blk->u_status_block.status_block_index);
702 prefetch(&fp->status_blk->c_status_block.status_block_index);
703 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
709 /* HW Lock for shared dual port PHYs */
710 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
712 mutex_lock(&bp->port.phy_mutex);
714 if (bp->port.need_hw_lock)
715 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
718 void bnx2x_release_phy_lock(struct bnx2x *bp)
720 if (bp->port.need_hw_lock)
721 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
723 mutex_unlock(&bp->port.phy_mutex);
726 void bnx2x_link_report(struct bnx2x *bp)
728 if (bp->flags & MF_FUNC_DIS) {
729 netif_carrier_off(bp->dev);
730 netdev_err(bp->dev, "NIC Link is Down\n");
734 if (bp->link_vars.link_up) {
737 if (bp->state == BNX2X_STATE_OPEN)
738 netif_carrier_on(bp->dev);
739 netdev_info(bp->dev, "NIC Link is Up, ");
741 line_speed = bp->link_vars.line_speed;
746 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
747 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
748 if (vn_max_rate < line_speed)
749 line_speed = vn_max_rate;
751 pr_cont("%d Mbps ", line_speed);
753 if (bp->link_vars.duplex == DUPLEX_FULL)
754 pr_cont("full duplex");
756 pr_cont("half duplex");
758 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
759 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
760 pr_cont(", receive ");
761 if (bp->link_vars.flow_ctrl &
763 pr_cont("& transmit ");
765 pr_cont(", transmit ");
767 pr_cont("flow control ON");
771 } else { /* link_down */
772 netif_carrier_off(bp->dev);
773 netdev_err(bp->dev, "NIC Link is Down\n");
777 void bnx2x_init_rx_rings(struct bnx2x *bp)
779 int func = BP_FUNC(bp);
780 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
781 ETH_MAX_AGGREGATION_QUEUES_E1H;
782 u16 ring_prod, cqe_ring_prod;
784 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
785 MAX_RX_AVAIL/bp->num_queues;
787 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
789 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
791 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
793 if (bp->flags & TPA_ENABLE_FLAG) {
795 for_each_queue(bp, j) {
796 struct bnx2x_fastpath *fp = &bp->fp[j];
798 for (i = 0; i < max_agg_queues; i++) {
799 fp->tpa_pool[i].skb =
800 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
801 if (!fp->tpa_pool[i].skb) {
802 BNX2X_ERR("Failed to allocate TPA "
803 "skb pool for queue[%d] - "
804 "disabling TPA on this "
806 bnx2x_free_tpa_pool(bp, fp, i);
810 dma_unmap_addr_set((struct sw_rx_bd *)
811 &bp->fp->tpa_pool[i],
813 fp->tpa_state[i] = BNX2X_TPA_STOP;
818 for_each_queue(bp, j) {
819 struct bnx2x_fastpath *fp = &bp->fp[j];
822 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
823 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
825 /* "next page" elements initialization */
827 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
828 struct eth_rx_sge *sge;
830 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
832 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
833 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
835 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
836 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
839 bnx2x_init_sge_ring_bit_mask(fp);
842 for (i = 1; i <= NUM_RX_RINGS; i++) {
843 struct eth_rx_bd *rx_bd;
845 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
847 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
848 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
850 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
851 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
855 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
856 struct eth_rx_cqe_next_page *nextpg;
858 nextpg = (struct eth_rx_cqe_next_page *)
859 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
861 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
862 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
864 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
865 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
875 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
878 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
883 ring_prod = NEXT_SGE_IDX(ring_prod);
885 fp->rx_sge_prod = ring_prod;
887 /* Allocate BDs and initialize BD ring */
888 fp->rx_comp_cons = 0;
889 cqe_ring_prod = ring_prod = 0;
890 for (i = 0; i < rx_ring_size; i++) {
891 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
892 BNX2X_ERR("was only able to allocate "
893 "%d rx skbs on queue[%d]\n", i, j);
894 fp->eth_q_stats.rx_skb_alloc_failed++;
897 ring_prod = NEXT_RX_IDX(ring_prod);
898 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
899 WARN_ON(ring_prod <= i);
902 fp->rx_bd_prod = ring_prod;
903 /* must not have more available CQEs than BDs */
904 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
906 fp->rx_pkt = fp->rx_calls = 0;
909 * this will generate an interrupt (to the TSTORM)
910 * must only be done after chip is initialized
912 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
917 REG_WR(bp, BAR_USTRORM_INTMEM +
918 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
919 U64_LO(fp->rx_comp_mapping));
920 REG_WR(bp, BAR_USTRORM_INTMEM +
921 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
922 U64_HI(fp->rx_comp_mapping));
925 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
929 for_each_queue(bp, i) {
930 struct bnx2x_fastpath *fp = &bp->fp[i];
932 u16 bd_cons = fp->tx_bd_cons;
933 u16 sw_prod = fp->tx_pkt_prod;
934 u16 sw_cons = fp->tx_pkt_cons;
936 while (sw_cons != sw_prod) {
937 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
943 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
947 for_each_queue(bp, j) {
948 struct bnx2x_fastpath *fp = &bp->fp[j];
950 for (i = 0; i < NUM_RX_BD; i++) {
951 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
952 struct sk_buff *skb = rx_buf->skb;
957 dma_unmap_single(&bp->pdev->dev,
958 dma_unmap_addr(rx_buf, mapping),
959 bp->rx_buf_size, DMA_FROM_DEVICE);
964 if (!fp->disable_tpa)
965 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
966 ETH_MAX_AGGREGATION_QUEUES_E1 :
967 ETH_MAX_AGGREGATION_QUEUES_E1H);
971 void bnx2x_free_skbs(struct bnx2x *bp)
973 bnx2x_free_tx_skbs(bp);
974 bnx2x_free_rx_skbs(bp);
977 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
981 free_irq(bp->msix_table[0].vector, bp->dev);
982 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
983 bp->msix_table[0].vector);
988 for_each_queue(bp, i) {
989 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
990 "state %x\n", i, bp->msix_table[i + offset].vector,
991 bnx2x_fp(bp, i, state));
993 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
997 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
999 if (bp->flags & USING_MSIX_FLAG) {
1001 bnx2x_free_msix_irqs(bp);
1002 pci_disable_msix(bp->pdev);
1003 bp->flags &= ~USING_MSIX_FLAG;
1005 } else if (bp->flags & USING_MSI_FLAG) {
1007 free_irq(bp->pdev->irq, bp->dev);
1008 pci_disable_msi(bp->pdev);
1009 bp->flags &= ~USING_MSI_FLAG;
1011 } else if (!disable_only)
1012 free_irq(bp->pdev->irq, bp->dev);
1015 static int bnx2x_enable_msix(struct bnx2x *bp)
1017 int i, rc, offset = 1;
1020 bp->msix_table[0].entry = igu_vec;
1021 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1024 igu_vec = BP_L_ID(bp) + offset;
1025 bp->msix_table[1].entry = igu_vec;
1026 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1029 for_each_queue(bp, i) {
1030 igu_vec = BP_L_ID(bp) + offset + i;
1031 bp->msix_table[i + offset].entry = igu_vec;
1032 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1033 "(fastpath #%u)\n", i + offset, igu_vec, i);
1036 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1037 BNX2X_NUM_QUEUES(bp) + offset);
1040 * reconfigure number of tx/rx queues according to available
1043 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1044 /* vectors available for FP */
1045 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1048 "Trying to use less MSI-X vectors: %d\n", rc);
1050 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1054 "MSI-X is not attainable rc %d\n", rc);
1058 bp->num_queues = min(bp->num_queues, fp_vec);
1060 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1063 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1067 bp->flags |= USING_MSIX_FLAG;
1072 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1074 int i, rc, offset = 1;
1076 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1077 bp->dev->name, bp->dev);
1079 BNX2X_ERR("request sp irq failed\n");
1086 for_each_queue(bp, i) {
1087 struct bnx2x_fastpath *fp = &bp->fp[i];
1088 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1091 rc = request_irq(bp->msix_table[i + offset].vector,
1092 bnx2x_msix_fp_int, 0, fp->name, fp);
1094 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1095 bnx2x_free_msix_irqs(bp);
1099 fp->state = BNX2X_FP_STATE_IRQ;
1102 i = BNX2X_NUM_QUEUES(bp);
1103 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1105 bp->msix_table[0].vector,
1106 0, bp->msix_table[offset].vector,
1107 i - 1, bp->msix_table[offset + i - 1].vector);
1112 static int bnx2x_enable_msi(struct bnx2x *bp)
1116 rc = pci_enable_msi(bp->pdev);
1118 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1121 bp->flags |= USING_MSI_FLAG;
1126 static int bnx2x_req_irq(struct bnx2x *bp)
1128 unsigned long flags;
1131 if (bp->flags & USING_MSI_FLAG)
1134 flags = IRQF_SHARED;
1136 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1137 bp->dev->name, bp->dev);
1139 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1144 static void bnx2x_napi_enable(struct bnx2x *bp)
1148 for_each_queue(bp, i)
1149 napi_enable(&bnx2x_fp(bp, i, napi));
1152 static void bnx2x_napi_disable(struct bnx2x *bp)
1156 for_each_queue(bp, i)
1157 napi_disable(&bnx2x_fp(bp, i, napi));
1160 void bnx2x_netif_start(struct bnx2x *bp)
1164 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1165 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1168 if (netif_running(bp->dev)) {
1169 bnx2x_napi_enable(bp);
1170 bnx2x_int_enable(bp);
1171 if (bp->state == BNX2X_STATE_OPEN)
1172 netif_tx_wake_all_queues(bp->dev);
1177 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1179 bnx2x_int_disable_sync(bp, disable_hw);
1180 bnx2x_napi_disable(bp);
1181 netif_tx_disable(bp->dev);
1183 static int bnx2x_set_num_queues(struct bnx2x *bp)
1187 switch (bp->int_mode) {
1191 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1194 /* Set number of queues according to bp->multi_mode value */
1195 bnx2x_set_num_queues_msix(bp);
1197 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1200 /* if we can't use MSI-X we only need one fp,
1201 * so try to enable MSI-X with the requested number of fp's
1202 * and fallback to MSI or legacy INTx with one fp
1204 rc = bnx2x_enable_msix(bp);
1206 /* failed to enable MSI-X */
1210 bp->dev->real_num_tx_queues = bp->num_queues;
1214 static void bnx2x_release_firmware(struct bnx2x *bp)
1216 kfree(bp->init_ops_offsets);
1217 kfree(bp->init_ops);
1218 kfree(bp->init_data);
1219 release_firmware(bp->firmware);
1222 /* must be called with rtnl_lock */
1223 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1228 /* Set init arrays */
1229 rc = bnx2x_init_firmware(bp);
1231 BNX2X_ERR("Error loading firmware\n");
1235 #ifdef BNX2X_STOP_ON_ERROR
1236 if (unlikely(bp->panic))
1240 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1242 rc = bnx2x_set_num_queues(bp);
1244 if (bnx2x_alloc_mem(bp)) {
1245 bnx2x_free_irq(bp, true);
1249 for_each_queue(bp, i)
1250 bnx2x_fp(bp, i, disable_tpa) =
1251 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1253 for_each_queue(bp, i)
1254 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1257 bnx2x_napi_enable(bp);
1259 if (bp->flags & USING_MSIX_FLAG) {
1260 rc = bnx2x_req_msix_irqs(bp);
1262 bnx2x_free_irq(bp, true);
1266 /* Fall to INTx if failed to enable MSI-X due to lack of
1267 memory (in bnx2x_set_num_queues()) */
1268 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1269 bnx2x_enable_msi(bp);
1271 rc = bnx2x_req_irq(bp);
1273 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1274 bnx2x_free_irq(bp, true);
1277 if (bp->flags & USING_MSI_FLAG) {
1278 bp->dev->irq = bp->pdev->irq;
1279 netdev_info(bp->dev, "using MSI IRQ %d\n",
1284 /* Send LOAD_REQUEST command to MCP
1285 Returns the type of LOAD command:
1286 if it is the first port to be initialized
1287 common blocks should be initialized, otherwise - not
1289 if (!BP_NOMCP(bp)) {
1290 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1292 BNX2X_ERR("MCP response failure, aborting\n");
1296 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1297 rc = -EBUSY; /* other port in diagnostic mode */
1302 int port = BP_PORT(bp);
1304 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1305 load_count[0], load_count[1], load_count[2]);
1307 load_count[1 + port]++;
1308 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1309 load_count[0], load_count[1], load_count[2]);
1310 if (load_count[0] == 1)
1311 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1312 else if (load_count[1 + port] == 1)
1313 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1315 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1318 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1319 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1323 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1326 rc = bnx2x_init_hw(bp, load_code);
1328 BNX2X_ERR("HW init failed, aborting\n");
1329 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1331 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1335 /* Setup NIC internals and enable interrupts */
1336 bnx2x_nic_init(bp, load_code);
1338 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1339 (bp->common.shmem2_base))
1340 SHMEM2_WR(bp, dcc_support,
1341 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1342 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1344 /* Send LOAD_DONE command to MCP */
1345 if (!BP_NOMCP(bp)) {
1346 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1348 BNX2X_ERR("MCP response failure, aborting\n");
1354 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1356 rc = bnx2x_setup_leading(bp);
1358 BNX2X_ERR("Setup leading failed!\n");
1359 #ifndef BNX2X_STOP_ON_ERROR
1367 if (CHIP_IS_E1H(bp))
1368 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1369 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1370 bp->flags |= MF_FUNC_DIS;
1373 if (bp->state == BNX2X_STATE_OPEN) {
1375 /* Enable Timer scan */
1376 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1378 for_each_nondefault_queue(bp, i) {
1379 rc = bnx2x_setup_multi(bp, i);
1389 bnx2x_set_eth_mac_addr_e1(bp, 1);
1391 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1393 /* Set iSCSI L2 MAC */
1394 mutex_lock(&bp->cnic_mutex);
1395 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1396 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1397 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1398 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1401 mutex_unlock(&bp->cnic_mutex);
1406 bnx2x_initial_phy_init(bp, load_mode);
1408 /* Start fast path */
1409 switch (load_mode) {
1411 if (bp->state == BNX2X_STATE_OPEN) {
1412 /* Tx queue should be only reenabled */
1413 netif_tx_wake_all_queues(bp->dev);
1415 /* Initialize the receive filter. */
1416 bnx2x_set_rx_mode(bp->dev);
1420 netif_tx_start_all_queues(bp->dev);
1421 if (bp->state != BNX2X_STATE_OPEN)
1422 netif_tx_disable(bp->dev);
1423 /* Initialize the receive filter. */
1424 bnx2x_set_rx_mode(bp->dev);
1428 /* Initialize the receive filter. */
1429 bnx2x_set_rx_mode(bp->dev);
1430 bp->state = BNX2X_STATE_DIAG;
1438 bnx2x__link_status_update(bp);
1440 /* start the timer */
1441 mod_timer(&bp->timer, jiffies + bp->current_interval);
1444 bnx2x_setup_cnic_irq_info(bp);
1445 if (bp->state == BNX2X_STATE_OPEN)
1446 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1448 bnx2x_inc_load_cnt(bp);
1450 bnx2x_release_firmware(bp);
1456 /* Disable Timer scan */
1457 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1460 bnx2x_int_disable_sync(bp, 1);
1461 if (!BP_NOMCP(bp)) {
1462 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1463 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1466 /* Free SKBs, SGEs, TPA pool and driver internals */
1467 bnx2x_free_skbs(bp);
1468 for_each_queue(bp, i)
1469 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1472 bnx2x_free_irq(bp, false);
1474 bnx2x_napi_disable(bp);
1475 for_each_queue(bp, i)
1476 netif_napi_del(&bnx2x_fp(bp, i, napi));
1479 bnx2x_release_firmware(bp);
1484 /* must be called with rtnl_lock */
1485 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1489 if (bp->state == BNX2X_STATE_CLOSED) {
1490 /* Interface has been removed - nothing to recover */
1491 bp->recovery_state = BNX2X_RECOVERY_DONE;
1493 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1500 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1502 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1504 /* Set "drop all" */
1505 bp->rx_mode = BNX2X_RX_MODE_NONE;
1506 bnx2x_set_storm_rx_mode(bp);
1508 /* Disable HW interrupts, NAPI and Tx */
1509 bnx2x_netif_stop(bp, 1);
1510 netif_carrier_off(bp->dev);
1512 del_timer_sync(&bp->timer);
1513 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1514 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1515 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1518 bnx2x_free_irq(bp, false);
1520 /* Cleanup the chip if needed */
1521 if (unload_mode != UNLOAD_RECOVERY)
1522 bnx2x_chip_cleanup(bp, unload_mode);
1526 /* Free SKBs, SGEs, TPA pool and driver internals */
1527 bnx2x_free_skbs(bp);
1528 for_each_queue(bp, i)
1529 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1530 for_each_queue(bp, i)
1531 netif_napi_del(&bnx2x_fp(bp, i, napi));
1534 bp->state = BNX2X_STATE_CLOSED;
1536 /* The last driver must disable a "close the gate" if there is no
1537 * parity attention or "process kill" pending.
1539 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1540 bnx2x_reset_is_done(bp))
1541 bnx2x_disable_close_the_gate(bp);
1543 /* Reset MCP mail box sequence if there is on going recovery */
1544 if (unload_mode == UNLOAD_RECOVERY)
1549 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1553 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1557 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1558 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1559 PCI_PM_CTRL_PME_STATUS));
1561 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1562 /* delay required during transition out of D3hot */
1567 /* If there are other clients above don't
1568 shut down the power */
1569 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1571 /* Don't shut down the power for emulation and FPGA */
1572 if (CHIP_REV_IS_SLOW(bp))
1575 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1579 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1581 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1584 /* No more memory access after this point until
1585 * device is brought back to D0.
1598 * net_device service functions
1601 static int bnx2x_poll(struct napi_struct *napi, int budget)
1604 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1606 struct bnx2x *bp = fp->bp;
1609 #ifdef BNX2X_STOP_ON_ERROR
1610 if (unlikely(bp->panic)) {
1611 napi_complete(napi);
1616 if (bnx2x_has_tx_work(fp))
1619 if (bnx2x_has_rx_work(fp)) {
1620 work_done += bnx2x_rx_int(fp, budget - work_done);
1622 /* must not complete if we consumed full budget */
1623 if (work_done >= budget)
1627 /* Fall out from the NAPI loop if needed */
1628 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1629 bnx2x_update_fpsb_idx(fp);
1630 /* bnx2x_has_rx_work() reads the status block, thus we need
1631 * to ensure that status block indices have been actually read
1632 * (bnx2x_update_fpsb_idx) prior to this check
1633 * (bnx2x_has_rx_work) so that we won't write the "newer"
1634 * value of the status block to IGU (if there was a DMA right
1635 * after bnx2x_has_rx_work and if there is no rmb, the memory
1636 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1637 * before bnx2x_ack_sb). In this case there will never be
1638 * another interrupt until there is another update of the
1639 * status block, while there is still unhandled work.
1643 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1644 napi_complete(napi);
1645 /* Re-enable interrupts */
1646 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1647 le16_to_cpu(fp->fp_c_idx),
1649 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1650 le16_to_cpu(fp->fp_u_idx),
1661 /* we split the first BD into headers and data BDs
1662 * to ease the pain of our fellow microcode engineers
1663 * we use one mapping for both BDs
1664 * So far this has only been observed to happen
1665 * in Other Operating Systems(TM)
1667 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1668 struct bnx2x_fastpath *fp,
1669 struct sw_tx_bd *tx_buf,
1670 struct eth_tx_start_bd **tx_bd, u16 hlen,
1671 u16 bd_prod, int nbd)
1673 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1674 struct eth_tx_bd *d_tx_bd;
1676 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1678 /* first fix first BD */
1679 h_tx_bd->nbd = cpu_to_le16(nbd);
1680 h_tx_bd->nbytes = cpu_to_le16(hlen);
1682 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1683 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1684 h_tx_bd->addr_lo, h_tx_bd->nbd);
1686 /* now get a new data BD
1687 * (after the pbd) and fill it */
1688 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1689 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1691 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1692 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1694 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1695 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1696 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1698 /* this marks the BD as one that has no individual mapping */
1699 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1701 DP(NETIF_MSG_TX_QUEUED,
1702 "TSO split data size is %d (%x:%x)\n",
1703 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1706 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1711 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1714 csum = (u16) ~csum_fold(csum_sub(csum,
1715 csum_partial(t_header - fix, fix, 0)));
1718 csum = (u16) ~csum_fold(csum_add(csum,
1719 csum_partial(t_header, -fix, 0)));
1721 return swab16(csum);
1724 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1728 if (skb->ip_summed != CHECKSUM_PARTIAL)
1732 if (skb->protocol == htons(ETH_P_IPV6)) {
1734 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1735 rc |= XMIT_CSUM_TCP;
1739 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1740 rc |= XMIT_CSUM_TCP;
1744 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1745 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1747 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1748 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1753 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1754 /* check if packet requires linearization (packet is too fragmented)
1755 no need to check fragmentation if page size > 8K (there will be no
1756 violation to FW restrictions) */
1757 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1762 int first_bd_sz = 0;
1764 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1765 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1767 if (xmit_type & XMIT_GSO) {
1768 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1769 /* Check if LSO packet needs to be copied:
1770 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1771 int wnd_size = MAX_FETCH_BD - 3;
1772 /* Number of windows to check */
1773 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1778 /* Headers length */
1779 hlen = (int)(skb_transport_header(skb) - skb->data) +
1782 /* Amount of data (w/o headers) on linear part of SKB*/
1783 first_bd_sz = skb_headlen(skb) - hlen;
1785 wnd_sum = first_bd_sz;
1787 /* Calculate the first sum - it's special */
1788 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1790 skb_shinfo(skb)->frags[frag_idx].size;
1792 /* If there was data on linear skb data - check it */
1793 if (first_bd_sz > 0) {
1794 if (unlikely(wnd_sum < lso_mss)) {
1799 wnd_sum -= first_bd_sz;
1802 /* Others are easier: run through the frag list and
1803 check all windows */
1804 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1806 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1808 if (unlikely(wnd_sum < lso_mss)) {
1813 skb_shinfo(skb)->frags[wnd_idx].size;
1816 /* in non-LSO too fragmented packet should always
1823 if (unlikely(to_copy))
1824 DP(NETIF_MSG_TX_QUEUED,
1825 "Linearization IS REQUIRED for %s packet. "
1826 "num_frags %d hlen %d first_bd_sz %d\n",
1827 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1828 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1834 /* called with netif_tx_lock
1835 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1836 * netif_wake_queue()
1838 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1840 struct bnx2x *bp = netdev_priv(dev);
1841 struct bnx2x_fastpath *fp;
1842 struct netdev_queue *txq;
1843 struct sw_tx_bd *tx_buf;
1844 struct eth_tx_start_bd *tx_start_bd;
1845 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1846 struct eth_tx_parse_bd *pbd = NULL;
1847 u16 pkt_prod, bd_prod;
1850 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1853 __le16 pkt_size = 0;
1855 u8 mac_type = UNICAST_ADDRESS;
1857 #ifdef BNX2X_STOP_ON_ERROR
1858 if (unlikely(bp->panic))
1859 return NETDEV_TX_BUSY;
1862 fp_index = skb_get_queue_mapping(skb);
1863 txq = netdev_get_tx_queue(dev, fp_index);
1865 fp = &bp->fp[fp_index];
1867 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1868 fp->eth_q_stats.driver_xoff++;
1869 netif_tx_stop_queue(txq);
1870 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1871 return NETDEV_TX_BUSY;
1874 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1875 " gso type %x xmit_type %x\n",
1876 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1877 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1879 eth = (struct ethhdr *)skb->data;
1881 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1882 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1883 if (is_broadcast_ether_addr(eth->h_dest))
1884 mac_type = BROADCAST_ADDRESS;
1886 mac_type = MULTICAST_ADDRESS;
1889 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1890 /* First, check if we need to linearize the skb (due to FW
1891 restrictions). No need to check fragmentation if page size > 8K
1892 (there will be no violation to FW restrictions) */
1893 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1894 /* Statistics of linearization */
1896 if (skb_linearize(skb) != 0) {
1897 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1898 "silently dropping this SKB\n");
1899 dev_kfree_skb_any(skb);
1900 return NETDEV_TX_OK;
1906 Please read carefully. First we use one BD which we mark as start,
1907 then we have a parsing info BD (used for TSO or xsum),
1908 and only then we have the rest of the TSO BDs.
1909 (don't forget to mark the last one as last,
1910 and to unmap only AFTER you write to the BD ...)
1911 And above all, all pdb sizes are in words - NOT DWORDS!
1914 pkt_prod = fp->tx_pkt_prod++;
1915 bd_prod = TX_BD(fp->tx_bd_prod);
1917 /* get a tx_buf and first BD */
1918 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1919 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1921 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1922 tx_start_bd->general_data = (mac_type <<
1923 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1925 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1927 /* remember the first BD of the packet */
1928 tx_buf->first_bd = fp->tx_bd_prod;
1932 DP(NETIF_MSG_TX_QUEUED,
1933 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1934 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1937 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1938 (bp->flags & HW_VLAN_TX_FLAG)) {
1939 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1940 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1943 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1945 /* turn on parsing and get a BD */
1946 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1947 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1949 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1951 if (xmit_type & XMIT_CSUM) {
1952 hlen = (skb_network_header(skb) - skb->data) / 2;
1954 /* for now NS flag is not used in Linux */
1956 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1957 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1959 pbd->ip_hlen = (skb_transport_header(skb) -
1960 skb_network_header(skb)) / 2;
1962 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1964 pbd->total_hlen = cpu_to_le16(hlen);
1967 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1969 if (xmit_type & XMIT_CSUM_V4)
1970 tx_start_bd->bd_flags.as_bitfield |=
1971 ETH_TX_BD_FLAGS_IP_CSUM;
1973 tx_start_bd->bd_flags.as_bitfield |=
1974 ETH_TX_BD_FLAGS_IPV6;
1976 if (xmit_type & XMIT_CSUM_TCP) {
1977 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1980 s8 fix = SKB_CS_OFF(skb); /* signed! */
1982 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1984 DP(NETIF_MSG_TX_QUEUED,
1985 "hlen %d fix %d csum before fix %x\n",
1986 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1988 /* HW bug: fixup the CSUM */
1989 pbd->tcp_pseudo_csum =
1990 bnx2x_csum_fix(skb_transport_header(skb),
1993 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1994 pbd->tcp_pseudo_csum);
1998 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1999 skb_headlen(skb), DMA_TO_DEVICE);
2001 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2002 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2003 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2004 tx_start_bd->nbd = cpu_to_le16(nbd);
2005 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2006 pkt_size = tx_start_bd->nbytes;
2008 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2009 " nbytes %d flags %x vlan %x\n",
2010 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2011 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2012 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
2014 if (xmit_type & XMIT_GSO) {
2016 DP(NETIF_MSG_TX_QUEUED,
2017 "TSO packet len %d hlen %d total len %d tso size %d\n",
2018 skb->len, hlen, skb_headlen(skb),
2019 skb_shinfo(skb)->gso_size);
2021 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2023 if (unlikely(skb_headlen(skb) > hlen))
2024 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2025 hlen, bd_prod, ++nbd);
2027 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2028 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2029 pbd->tcp_flags = pbd_tcp_flags(skb);
2031 if (xmit_type & XMIT_GSO_V4) {
2032 pbd->ip_id = swab16(ip_hdr(skb)->id);
2033 pbd->tcp_pseudo_csum =
2034 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2036 0, IPPROTO_TCP, 0));
2039 pbd->tcp_pseudo_csum =
2040 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2041 &ipv6_hdr(skb)->daddr,
2042 0, IPPROTO_TCP, 0));
2044 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2046 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2048 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2049 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2051 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2052 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2053 if (total_pkt_bd == NULL)
2054 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2056 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2058 frag->size, DMA_TO_DEVICE);
2060 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2061 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2062 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2063 le16_add_cpu(&pkt_size, frag->size);
2065 DP(NETIF_MSG_TX_QUEUED,
2066 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2067 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2068 le16_to_cpu(tx_data_bd->nbytes));
2071 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2073 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2075 /* now send a tx doorbell, counting the next BD
2076 * if the packet contains or ends with it
2078 if (TX_BD_POFF(bd_prod) < nbd)
2081 if (total_pkt_bd != NULL)
2082 total_pkt_bd->total_pkt_bytes = pkt_size;
2085 DP(NETIF_MSG_TX_QUEUED,
2086 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2087 " tcp_flags %x xsum %x seq %u hlen %u\n",
2088 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2089 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2090 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2092 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2095 * Make sure that the BD data is updated before updating the producer
2096 * since FW might read the BD right after the producer is updated.
2097 * This is only applicable for weak-ordered memory model archs such
2098 * as IA-64. The following barrier is also mandatory since FW will
2099 * assumes packets must have BDs.
2103 fp->tx_db.data.prod += nbd;
2105 DOORBELL(bp, fp->index, fp->tx_db.raw);
2109 fp->tx_bd_prod += nbd;
2111 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2112 netif_tx_stop_queue(txq);
2114 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2115 * ordering of set_bit() in netif_tx_stop_queue() and read of
2119 fp->eth_q_stats.driver_xoff++;
2120 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2121 netif_tx_wake_queue(txq);
2125 return NETDEV_TX_OK;
2127 /* called with rtnl_lock */
2128 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2130 struct sockaddr *addr = p;
2131 struct bnx2x *bp = netdev_priv(dev);
2133 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2136 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2137 if (netif_running(dev)) {
2139 bnx2x_set_eth_mac_addr_e1(bp, 1);
2141 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2147 /* called with rtnl_lock */
2148 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2150 struct bnx2x *bp = netdev_priv(dev);
2153 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2154 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2158 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2159 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2162 /* This does not race with packet allocation
2163 * because the actual alloc size is
2164 * only updated as part of load
2168 if (netif_running(dev)) {
2169 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2170 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2176 void bnx2x_tx_timeout(struct net_device *dev)
2178 struct bnx2x *bp = netdev_priv(dev);
2180 #ifdef BNX2X_STOP_ON_ERROR
2184 /* This allows the netif to be shutdown gracefully before resetting */
2185 schedule_delayed_work(&bp->reset_task, 0);
2189 /* called with rtnl_lock */
2190 void bnx2x_vlan_rx_register(struct net_device *dev,
2191 struct vlan_group *vlgrp)
2193 struct bnx2x *bp = netdev_priv(dev);
2197 /* Set flags according to the required capabilities */
2198 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2200 if (dev->features & NETIF_F_HW_VLAN_TX)
2201 bp->flags |= HW_VLAN_TX_FLAG;
2203 if (dev->features & NETIF_F_HW_VLAN_RX)
2204 bp->flags |= HW_VLAN_RX_FLAG;
2206 if (netif_running(dev))
2207 bnx2x_set_client_config(bp);
2211 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2213 struct net_device *dev = pci_get_drvdata(pdev);
2217 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2220 bp = netdev_priv(dev);
2224 pci_save_state(pdev);
2226 if (!netif_running(dev)) {
2231 netif_device_detach(dev);
2233 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2235 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2242 int bnx2x_resume(struct pci_dev *pdev)
2244 struct net_device *dev = pci_get_drvdata(pdev);
2249 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2252 bp = netdev_priv(dev);
2254 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2255 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2261 pci_restore_state(pdev);
2263 if (!netif_running(dev)) {
2268 bnx2x_set_power_state(bp, PCI_D0);
2269 netif_device_attach(dev);
2271 rc = bnx2x_nic_load(bp, LOAD_OPEN);