1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
19 #include <linux/etherdevice.h>
21 #include <linux/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
27 #include <linux/if_vlan.h>
30 static int bnx2x_poll(struct napi_struct *napi, int budget);
32 /* free skb in the packet ring at pos idx
33 * return idx of last bd freed
35 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
55 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58 #ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
64 new_cons = nbd + tx_buf->first_bd;
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69 /* Skip a parse bd... */
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
105 #ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
114 while (sw_cons != hw_cons) {
117 pkt_cons = TX_BD(sw_cons);
119 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
121 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
122 hw_cons, sw_cons, pkt_cons);
124 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
126 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
129 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
133 fp->tx_pkt_cons = sw_cons;
134 fp->tx_bd_cons = bd_cons;
136 /* Need to make the tx_bd_cons update visible to start_xmit()
137 * before checking for netif_tx_queue_stopped(). Without the
138 * memory barrier, there is a small possibility that
139 * start_xmit() will miss it and cause the queue to be stopped
144 /* TBD need a thresh? */
145 if (unlikely(netif_tx_queue_stopped(txq))) {
146 /* Taking tx_lock() is needed to prevent reenabling the queue
147 * while it's empty. This could have happen if rx_action() gets
148 * suspended in bnx2x_tx_int() after the condition before
149 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
151 * stops the queue->sees fresh tx_bd_cons->releases the queue->
152 * sends some packets consuming the whole queue again->
156 __netif_tx_lock(txq, smp_processor_id());
158 if ((netif_tx_queue_stopped(txq)) &&
159 (bp->state == BNX2X_STATE_OPEN) &&
160 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
161 netif_tx_wake_queue(txq);
163 __netif_tx_unlock(txq);
168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
171 u16 last_max = fp->last_max_sge;
173 if (SUB_S16(idx, last_max) > 0)
174 fp->last_max_sge = idx;
177 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
178 struct eth_fast_path_rx_cqe *fp_cqe)
180 struct bnx2x *bp = fp->bp;
181 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
182 le16_to_cpu(fp_cqe->len_on_bd)) >>
184 u16 last_max, last_elem, first_elem;
191 /* First mark all used pages */
192 for (i = 0; i < sge_len; i++)
193 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
195 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
196 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
198 /* Here we assume that the last SGE index is the biggest */
199 prefetch((void *)(fp->sge_mask));
200 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
202 last_max = RX_SGE(fp->last_max_sge);
203 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
204 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
206 /* If ring is not full */
207 if (last_elem + 1 != first_elem)
210 /* Now update the prod */
211 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
212 if (likely(fp->sge_mask[i]))
215 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
216 delta += RX_SGE_MASK_ELEM_SZ;
220 fp->rx_sge_prod += delta;
221 /* clear page-end entries */
222 bnx2x_clear_sge_mask_next_elems(fp);
225 DP(NETIF_MSG_RX_STATUS,
226 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
227 fp->last_max_sge, fp->rx_sge_prod);
230 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
231 struct sk_buff *skb, u16 cons, u16 prod)
233 struct bnx2x *bp = fp->bp;
234 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
235 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
236 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
239 /* move empty skb from pool to prod and map it */
240 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
241 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
242 bp->rx_buf_size, DMA_FROM_DEVICE);
243 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
245 /* move partial skb from cons to pool (don't unmap yet) */
246 fp->tpa_pool[queue] = *cons_rx_buf;
248 /* mark bin state as start - print error if current state != stop */
249 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
250 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
252 fp->tpa_state[queue] = BNX2X_TPA_START;
254 /* point prod_bd to new skb */
255 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
256 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
258 #ifdef BNX2X_STOP_ON_ERROR
259 fp->tpa_queue_used |= (1 << queue);
260 #ifdef _ASM_GENERIC_INT_L64_H
261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
263 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
269 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
271 struct eth_fast_path_rx_cqe *fp_cqe,
274 struct sw_rx_page *rx_pg, old_rx_pg;
275 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
276 u32 i, frag_len, frag_size, pages;
280 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
281 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
283 /* This is needed in order to enable forwarding support */
285 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
286 max(frag_size, (u32)len_on_bd));
288 #ifdef BNX2X_STOP_ON_ERROR
289 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
290 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
292 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
293 fp_cqe->pkt_len, len_on_bd);
299 /* Run through the SGL and compose the fragmented skb */
300 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
301 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
303 /* FW gives the indices of the SGE as if the ring is an array
304 (meaning that "next" element will consume 2 indices) */
305 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
306 rx_pg = &fp->rx_page_ring[sge_idx];
309 /* If we fail to allocate a substitute page, we simply stop
310 where we are and drop the whole packet */
311 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
313 fp->eth_q_stats.rx_skb_alloc_failed++;
317 /* Unmap the page as we r going to pass it to the stack */
318 dma_unmap_page(&bp->pdev->dev,
319 dma_unmap_addr(&old_rx_pg, mapping),
320 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
322 /* Add one frag and update the appropriate fields in the skb */
323 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
325 skb->data_len += frag_len;
326 skb->truesize += frag_len;
327 skb->len += frag_len;
329 frag_size -= frag_len;
335 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
336 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
339 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
340 struct sk_buff *skb = rx_buf->skb;
342 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
344 /* Unmap skb in the pool anyway, as we are going to change
345 pool entry status to BNX2X_TPA_STOP even if new skb allocation
347 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
348 bp->rx_buf_size, DMA_FROM_DEVICE);
350 if (likely(new_skb)) {
351 /* fix ip xsum and give it to the stack */
352 /* (no need to map the new skb) */
355 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
357 int is_not_hwaccel_vlan_cqe =
358 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
362 prefetch(((char *)(skb)) + 128);
364 #ifdef BNX2X_STOP_ON_ERROR
365 if (pad + len > bp->rx_buf_size) {
366 BNX2X_ERR("skb_put is about to fail... "
367 "pad %d len %d rx_buf_size %d\n",
368 pad, len, bp->rx_buf_size);
374 skb_reserve(skb, pad);
377 skb->protocol = eth_type_trans(skb, bp->dev);
378 skb->ip_summed = CHECKSUM_UNNECESSARY;
383 iph = (struct iphdr *)skb->data;
385 /* If there is no Rx VLAN offloading -
386 take VLAN tag into an account */
387 if (unlikely(is_not_hwaccel_vlan_cqe))
388 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
391 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
394 if (!bnx2x_fill_frag_skb(bp, fp, skb,
395 &cqe->fast_path_cqe, cqe_idx)) {
397 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
398 (!is_not_hwaccel_vlan_cqe))
399 vlan_gro_receive(&fp->napi, bp->vlgrp,
400 le16_to_cpu(cqe->fast_path_cqe.
404 napi_gro_receive(&fp->napi, skb);
406 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
407 " - dropping packet!\n");
412 /* put new skb in bin */
413 fp->tpa_pool[queue].skb = new_skb;
416 /* else drop the packet and keep the buffer in the bin */
417 DP(NETIF_MSG_RX_STATUS,
418 "Failed to allocate new skb - dropping packet!\n");
419 fp->eth_q_stats.rx_skb_alloc_failed++;
422 fp->tpa_state[queue] = BNX2X_TPA_STOP;
425 /* Set Toeplitz hash value in the skb using the value from the
426 * CQE (calculated by HW).
428 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
431 /* Set Toeplitz hash from CQE */
432 if ((bp->dev->features & NETIF_F_RXHASH) &&
433 (cqe->fast_path_cqe.status_flags &
434 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
436 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
439 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
441 struct bnx2x *bp = fp->bp;
442 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
443 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
446 #ifdef BNX2X_STOP_ON_ERROR
447 if (unlikely(bp->panic))
451 /* CQ "next element" is of the size of the regular element,
452 that's why it's ok here */
453 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
454 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
457 bd_cons = fp->rx_bd_cons;
458 bd_prod = fp->rx_bd_prod;
459 bd_prod_fw = bd_prod;
460 sw_comp_cons = fp->rx_comp_cons;
461 sw_comp_prod = fp->rx_comp_prod;
463 /* Memory barrier necessary as speculative reads of the rx
464 * buffer can be ahead of the index in the status block
468 DP(NETIF_MSG_RX_STATUS,
469 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
470 fp->index, hw_comp_cons, sw_comp_cons);
472 while (sw_comp_cons != hw_comp_cons) {
473 struct sw_rx_bd *rx_buf = NULL;
475 union eth_rx_cqe *cqe;
479 comp_ring_cons = RCQ_BD(sw_comp_cons);
480 bd_prod = RX_BD(bd_prod);
481 bd_cons = RX_BD(bd_cons);
483 /* Prefetch the page containing the BD descriptor
484 at producer's index. It will be needed when new skb is
486 prefetch((void *)(PAGE_ALIGN((unsigned long)
487 (&fp->rx_desc_ring[bd_prod])) -
490 cqe = &fp->rx_comp_ring[comp_ring_cons];
491 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
493 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
494 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
495 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
496 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
497 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
498 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
500 /* is this a slowpath msg? */
501 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
502 bnx2x_sp_event(fp, cqe);
505 /* this is an rx packet */
507 rx_buf = &fp->rx_buf_ring[bd_cons];
510 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
511 pad = cqe->fast_path_cqe.placement_offset;
513 /* If CQE is marked both TPA_START and TPA_END
514 it is a non-TPA CQE */
515 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) {
518 u16 queue = cqe->fast_path_cqe.queue_index;
520 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521 DP(NETIF_MSG_RX_STATUS,
522 "calling tpa_start on queue %d\n",
525 bnx2x_tpa_start(fp, queue, skb,
528 /* Set Toeplitz hash for an LRO skb */
529 bnx2x_set_skb_rxhash(bp, cqe, skb);
534 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
535 DP(NETIF_MSG_RX_STATUS,
536 "calling tpa_stop on queue %d\n",
539 if (!BNX2X_RX_SUM_FIX(cqe))
540 BNX2X_ERR("STOP on none TCP "
543 /* This is a size of the linear data
545 len = le16_to_cpu(cqe->fast_path_cqe.
547 bnx2x_tpa_stop(bp, fp, queue, pad,
548 len, cqe, comp_ring_cons);
549 #ifdef BNX2X_STOP_ON_ERROR
554 bnx2x_update_sge_prod(fp,
555 &cqe->fast_path_cqe);
560 dma_sync_single_for_device(&bp->pdev->dev,
561 dma_unmap_addr(rx_buf, mapping),
562 pad + RX_COPY_THRESH,
564 prefetch(((char *)(skb)) + 128);
566 /* is this an error packet? */
567 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
569 "ERROR flags %x rx packet %u\n",
570 cqe_fp_flags, sw_comp_cons);
571 fp->eth_q_stats.rx_err_discard_pkt++;
575 /* Since we don't have a jumbo ring
576 * copy small packets if mtu > 1500
578 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
579 (len <= RX_COPY_THRESH)) {
580 struct sk_buff *new_skb;
582 new_skb = netdev_alloc_skb(bp->dev,
584 if (new_skb == NULL) {
586 "ERROR packet dropped "
587 "because of alloc failure\n");
588 fp->eth_q_stats.rx_skb_alloc_failed++;
593 skb_copy_from_linear_data_offset(skb, pad,
594 new_skb->data + pad, len);
595 skb_reserve(new_skb, pad);
596 skb_put(new_skb, len);
598 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
603 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
604 dma_unmap_single(&bp->pdev->dev,
605 dma_unmap_addr(rx_buf, mapping),
608 skb_reserve(skb, pad);
613 "ERROR packet dropped because "
614 "of alloc failure\n");
615 fp->eth_q_stats.rx_skb_alloc_failed++;
617 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
621 skb->protocol = eth_type_trans(skb, bp->dev);
623 /* Set Toeplitz hash for a none-LRO skb */
624 bnx2x_set_skb_rxhash(bp, cqe, skb);
626 skb_checksum_none_assert(skb);
628 if (likely(BNX2X_RX_CSUM_OK(cqe)))
629 skb->ip_summed = CHECKSUM_UNNECESSARY;
631 fp->eth_q_stats.hw_csum_err++;
635 skb_record_rx_queue(skb, fp->index);
638 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
639 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
641 vlan_gro_receive(&fp->napi, bp->vlgrp,
642 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
645 napi_gro_receive(&fp->napi, skb);
651 bd_cons = NEXT_RX_IDX(bd_cons);
652 bd_prod = NEXT_RX_IDX(bd_prod);
653 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
656 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
657 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
659 if (rx_pkt == budget)
663 fp->rx_bd_cons = bd_cons;
664 fp->rx_bd_prod = bd_prod_fw;
665 fp->rx_comp_cons = sw_comp_cons;
666 fp->rx_comp_prod = sw_comp_prod;
668 /* Update producers */
669 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
672 fp->rx_pkt += rx_pkt;
678 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
680 struct bnx2x_fastpath *fp = fp_cookie;
681 struct bnx2x *bp = fp->bp;
683 /* Return here if interrupt is disabled */
684 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
685 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
689 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
690 fp->index, fp->sb_id);
691 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
693 #ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->status_blk->u_status_block.status_block_index);
702 prefetch(&fp->status_blk->c_status_block.status_block_index);
703 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
709 /* HW Lock for shared dual port PHYs */
710 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
712 mutex_lock(&bp->port.phy_mutex);
714 if (bp->port.need_hw_lock)
715 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
718 void bnx2x_release_phy_lock(struct bnx2x *bp)
720 if (bp->port.need_hw_lock)
721 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
723 mutex_unlock(&bp->port.phy_mutex);
726 void bnx2x_link_report(struct bnx2x *bp)
728 if (bp->flags & MF_FUNC_DIS) {
729 netif_carrier_off(bp->dev);
730 netdev_err(bp->dev, "NIC Link is Down\n");
734 if (bp->link_vars.link_up) {
737 if (bp->state == BNX2X_STATE_OPEN)
738 netif_carrier_on(bp->dev);
739 netdev_info(bp->dev, "NIC Link is Up, ");
741 line_speed = bp->link_vars.line_speed;
746 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
747 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
748 if (vn_max_rate < line_speed)
749 line_speed = vn_max_rate;
751 pr_cont("%d Mbps ", line_speed);
753 if (bp->link_vars.duplex == DUPLEX_FULL)
754 pr_cont("full duplex");
756 pr_cont("half duplex");
758 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
759 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
760 pr_cont(", receive ");
761 if (bp->link_vars.flow_ctrl &
763 pr_cont("& transmit ");
765 pr_cont(", transmit ");
767 pr_cont("flow control ON");
771 } else { /* link_down */
772 netif_carrier_off(bp->dev);
773 netdev_err(bp->dev, "NIC Link is Down\n");
777 void bnx2x_init_rx_rings(struct bnx2x *bp)
779 int func = BP_FUNC(bp);
780 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
781 ETH_MAX_AGGREGATION_QUEUES_E1H;
782 u16 ring_prod, cqe_ring_prod;
784 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
785 MAX_RX_AVAIL/bp->num_queues;
787 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
789 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
791 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
793 if (bp->flags & TPA_ENABLE_FLAG) {
795 for_each_queue(bp, j) {
796 struct bnx2x_fastpath *fp = &bp->fp[j];
798 for (i = 0; i < max_agg_queues; i++) {
799 fp->tpa_pool[i].skb =
800 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
801 if (!fp->tpa_pool[i].skb) {
802 BNX2X_ERR("Failed to allocate TPA "
803 "skb pool for queue[%d] - "
804 "disabling TPA on this "
806 bnx2x_free_tpa_pool(bp, fp, i);
810 dma_unmap_addr_set((struct sw_rx_bd *)
811 &bp->fp->tpa_pool[i],
813 fp->tpa_state[i] = BNX2X_TPA_STOP;
818 for_each_queue(bp, j) {
819 struct bnx2x_fastpath *fp = &bp->fp[j];
822 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
823 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
825 /* "next page" elements initialization */
827 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
828 struct eth_rx_sge *sge;
830 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
832 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
833 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
835 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
836 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
839 bnx2x_init_sge_ring_bit_mask(fp);
842 for (i = 1; i <= NUM_RX_RINGS; i++) {
843 struct eth_rx_bd *rx_bd;
845 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
847 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
848 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
850 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
851 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
855 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
856 struct eth_rx_cqe_next_page *nextpg;
858 nextpg = (struct eth_rx_cqe_next_page *)
859 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
861 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
862 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
864 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
865 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
875 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
878 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
883 ring_prod = NEXT_SGE_IDX(ring_prod);
885 fp->rx_sge_prod = ring_prod;
887 /* Allocate BDs and initialize BD ring */
888 fp->rx_comp_cons = 0;
889 cqe_ring_prod = ring_prod = 0;
890 for (i = 0; i < rx_ring_size; i++) {
891 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
892 BNX2X_ERR("was only able to allocate "
893 "%d rx skbs on queue[%d]\n", i, j);
894 fp->eth_q_stats.rx_skb_alloc_failed++;
897 ring_prod = NEXT_RX_IDX(ring_prod);
898 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
899 WARN_ON(ring_prod <= i);
902 fp->rx_bd_prod = ring_prod;
903 /* must not have more available CQEs than BDs */
904 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
906 fp->rx_pkt = fp->rx_calls = 0;
909 * this will generate an interrupt (to the TSTORM)
910 * must only be done after chip is initialized
912 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
917 REG_WR(bp, BAR_USTRORM_INTMEM +
918 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
919 U64_LO(fp->rx_comp_mapping));
920 REG_WR(bp, BAR_USTRORM_INTMEM +
921 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
922 U64_HI(fp->rx_comp_mapping));
925 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
929 for_each_queue(bp, i) {
930 struct bnx2x_fastpath *fp = &bp->fp[i];
932 u16 bd_cons = fp->tx_bd_cons;
933 u16 sw_prod = fp->tx_pkt_prod;
934 u16 sw_cons = fp->tx_pkt_cons;
936 while (sw_cons != sw_prod) {
937 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
943 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
947 for_each_queue(bp, j) {
948 struct bnx2x_fastpath *fp = &bp->fp[j];
950 for (i = 0; i < NUM_RX_BD; i++) {
951 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
952 struct sk_buff *skb = rx_buf->skb;
957 dma_unmap_single(&bp->pdev->dev,
958 dma_unmap_addr(rx_buf, mapping),
959 bp->rx_buf_size, DMA_FROM_DEVICE);
964 if (!fp->disable_tpa)
965 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
966 ETH_MAX_AGGREGATION_QUEUES_E1 :
967 ETH_MAX_AGGREGATION_QUEUES_E1H);
971 void bnx2x_free_skbs(struct bnx2x *bp)
973 bnx2x_free_tx_skbs(bp);
974 bnx2x_free_rx_skbs(bp);
977 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
981 free_irq(bp->msix_table[0].vector, bp->dev);
982 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
983 bp->msix_table[0].vector);
988 for_each_queue(bp, i) {
989 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
990 "state %x\n", i, bp->msix_table[i + offset].vector,
991 bnx2x_fp(bp, i, state));
993 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
997 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
999 if (bp->flags & USING_MSIX_FLAG) {
1001 bnx2x_free_msix_irqs(bp);
1002 pci_disable_msix(bp->pdev);
1003 bp->flags &= ~USING_MSIX_FLAG;
1005 } else if (bp->flags & USING_MSI_FLAG) {
1007 free_irq(bp->pdev->irq, bp->dev);
1008 pci_disable_msi(bp->pdev);
1009 bp->flags &= ~USING_MSI_FLAG;
1011 } else if (!disable_only)
1012 free_irq(bp->pdev->irq, bp->dev);
1015 static int bnx2x_enable_msix(struct bnx2x *bp)
1017 int i, rc, offset = 1;
1020 bp->msix_table[0].entry = igu_vec;
1021 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1024 igu_vec = BP_L_ID(bp) + offset;
1025 bp->msix_table[1].entry = igu_vec;
1026 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1029 for_each_queue(bp, i) {
1030 igu_vec = BP_L_ID(bp) + offset + i;
1031 bp->msix_table[i + offset].entry = igu_vec;
1032 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1033 "(fastpath #%u)\n", i + offset, igu_vec, i);
1036 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1037 BNX2X_NUM_QUEUES(bp) + offset);
1040 * reconfigure number of tx/rx queues according to available
1043 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1044 /* vectors available for FP */
1045 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1048 "Trying to use less MSI-X vectors: %d\n", rc);
1050 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1054 "MSI-X is not attainable rc %d\n", rc);
1058 bp->num_queues = min(bp->num_queues, fp_vec);
1060 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1063 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1067 bp->flags |= USING_MSIX_FLAG;
1072 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1074 int i, rc, offset = 1;
1076 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1077 bp->dev->name, bp->dev);
1079 BNX2X_ERR("request sp irq failed\n");
1086 for_each_queue(bp, i) {
1087 struct bnx2x_fastpath *fp = &bp->fp[i];
1088 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1091 rc = request_irq(bp->msix_table[i + offset].vector,
1092 bnx2x_msix_fp_int, 0, fp->name, fp);
1094 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1095 bnx2x_free_msix_irqs(bp);
1099 fp->state = BNX2X_FP_STATE_IRQ;
1102 i = BNX2X_NUM_QUEUES(bp);
1103 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1105 bp->msix_table[0].vector,
1106 0, bp->msix_table[offset].vector,
1107 i - 1, bp->msix_table[offset + i - 1].vector);
1112 static int bnx2x_enable_msi(struct bnx2x *bp)
1116 rc = pci_enable_msi(bp->pdev);
1118 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1121 bp->flags |= USING_MSI_FLAG;
1126 static int bnx2x_req_irq(struct bnx2x *bp)
1128 unsigned long flags;
1131 if (bp->flags & USING_MSI_FLAG)
1134 flags = IRQF_SHARED;
1136 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1137 bp->dev->name, bp->dev);
1139 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1144 static void bnx2x_napi_enable(struct bnx2x *bp)
1148 for_each_queue(bp, i)
1149 napi_enable(&bnx2x_fp(bp, i, napi));
1152 static void bnx2x_napi_disable(struct bnx2x *bp)
1156 for_each_queue(bp, i)
1157 napi_disable(&bnx2x_fp(bp, i, napi));
1160 void bnx2x_netif_start(struct bnx2x *bp)
1164 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1165 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1168 if (netif_running(bp->dev)) {
1169 bnx2x_napi_enable(bp);
1170 bnx2x_int_enable(bp);
1171 if (bp->state == BNX2X_STATE_OPEN)
1172 netif_tx_wake_all_queues(bp->dev);
1177 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1179 bnx2x_int_disable_sync(bp, disable_hw);
1180 bnx2x_napi_disable(bp);
1181 netif_tx_disable(bp->dev);
1183 static int bnx2x_set_num_queues(struct bnx2x *bp)
1187 switch (bp->int_mode) {
1189 bnx2x_enable_msi(bp);
1190 /* falling through... */
1193 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1196 /* Set number of queues according to bp->multi_mode value */
1197 bnx2x_set_num_queues_msix(bp);
1199 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1202 /* if we can't use MSI-X we only need one fp,
1203 * so try to enable MSI-X with the requested number of fp's
1204 * and fallback to MSI or legacy INTx with one fp
1206 rc = bnx2x_enable_msix(bp);
1208 /* failed to enable MSI-X */
1211 /* Fall to INTx if failed to enable MSI-X due to lack of
1212 * memory (in bnx2x_set_num_queues()) */
1213 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1214 bnx2x_enable_msi(bp);
1219 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1220 return netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1223 static void bnx2x_release_firmware(struct bnx2x *bp)
1225 kfree(bp->init_ops_offsets);
1226 kfree(bp->init_ops);
1227 kfree(bp->init_data);
1228 release_firmware(bp->firmware);
1231 /* must be called with rtnl_lock */
1232 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1237 /* Set init arrays */
1238 rc = bnx2x_init_firmware(bp);
1240 BNX2X_ERR("Error loading firmware\n");
1244 #ifdef BNX2X_STOP_ON_ERROR
1245 if (unlikely(bp->panic))
1249 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1251 rc = bnx2x_set_num_queues(bp);
1255 if (bnx2x_alloc_mem(bp)) {
1256 bnx2x_free_irq(bp, true);
1260 for_each_queue(bp, i)
1261 bnx2x_fp(bp, i, disable_tpa) =
1262 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1264 for_each_queue(bp, i)
1265 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1268 bnx2x_napi_enable(bp);
1270 if (bp->flags & USING_MSIX_FLAG) {
1271 rc = bnx2x_req_msix_irqs(bp);
1273 bnx2x_free_irq(bp, true);
1278 rc = bnx2x_req_irq(bp);
1280 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1281 bnx2x_free_irq(bp, true);
1284 if (bp->flags & USING_MSI_FLAG) {
1285 bp->dev->irq = bp->pdev->irq;
1286 netdev_info(bp->dev, "using MSI IRQ %d\n",
1291 /* Send LOAD_REQUEST command to MCP
1292 Returns the type of LOAD command:
1293 if it is the first port to be initialized
1294 common blocks should be initialized, otherwise - not
1296 if (!BP_NOMCP(bp)) {
1297 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1299 BNX2X_ERR("MCP response failure, aborting\n");
1303 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1304 rc = -EBUSY; /* other port in diagnostic mode */
1309 int port = BP_PORT(bp);
1311 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1312 load_count[0], load_count[1], load_count[2]);
1314 load_count[1 + port]++;
1315 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1316 load_count[0], load_count[1], load_count[2]);
1317 if (load_count[0] == 1)
1318 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1319 else if (load_count[1 + port] == 1)
1320 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1322 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1325 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1326 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1330 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1333 rc = bnx2x_init_hw(bp, load_code);
1335 BNX2X_ERR("HW init failed, aborting\n");
1336 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1337 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1338 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1342 /* Setup NIC internals and enable interrupts */
1343 bnx2x_nic_init(bp, load_code);
1345 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1346 (bp->common.shmem2_base))
1347 SHMEM2_WR(bp, dcc_support,
1348 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1349 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1351 /* Send LOAD_DONE command to MCP */
1352 if (!BP_NOMCP(bp)) {
1353 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1355 BNX2X_ERR("MCP response failure, aborting\n");
1361 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1363 rc = bnx2x_setup_leading(bp);
1365 BNX2X_ERR("Setup leading failed!\n");
1366 #ifndef BNX2X_STOP_ON_ERROR
1374 if (CHIP_IS_E1H(bp))
1375 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1376 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1377 bp->flags |= MF_FUNC_DIS;
1380 if (bp->state == BNX2X_STATE_OPEN) {
1382 /* Enable Timer scan */
1383 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1385 for_each_nondefault_queue(bp, i) {
1386 rc = bnx2x_setup_multi(bp, i);
1396 bnx2x_set_eth_mac_addr_e1(bp, 1);
1398 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1400 /* Set iSCSI L2 MAC */
1401 mutex_lock(&bp->cnic_mutex);
1402 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1403 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1404 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1405 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1408 mutex_unlock(&bp->cnic_mutex);
1413 bnx2x_initial_phy_init(bp, load_mode);
1415 /* Start fast path */
1416 switch (load_mode) {
1418 if (bp->state == BNX2X_STATE_OPEN) {
1419 /* Tx queue should be only reenabled */
1420 netif_tx_wake_all_queues(bp->dev);
1422 /* Initialize the receive filter. */
1423 bnx2x_set_rx_mode(bp->dev);
1427 netif_tx_start_all_queues(bp->dev);
1428 if (bp->state != BNX2X_STATE_OPEN)
1429 netif_tx_disable(bp->dev);
1430 /* Initialize the receive filter. */
1431 bnx2x_set_rx_mode(bp->dev);
1435 /* Initialize the receive filter. */
1436 bnx2x_set_rx_mode(bp->dev);
1437 bp->state = BNX2X_STATE_DIAG;
1445 bnx2x__link_status_update(bp);
1447 /* start the timer */
1448 mod_timer(&bp->timer, jiffies + bp->current_interval);
1451 bnx2x_setup_cnic_irq_info(bp);
1452 if (bp->state == BNX2X_STATE_OPEN)
1453 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1455 bnx2x_inc_load_cnt(bp);
1457 bnx2x_release_firmware(bp);
1463 /* Disable Timer scan */
1464 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1467 bnx2x_int_disable_sync(bp, 1);
1468 if (!BP_NOMCP(bp)) {
1469 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1470 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1473 /* Free SKBs, SGEs, TPA pool and driver internals */
1474 bnx2x_free_skbs(bp);
1475 for_each_queue(bp, i)
1476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1479 bnx2x_free_irq(bp, false);
1481 bnx2x_napi_disable(bp);
1482 for_each_queue(bp, i)
1483 netif_napi_del(&bnx2x_fp(bp, i, napi));
1486 bnx2x_release_firmware(bp);
1491 /* must be called with rtnl_lock */
1492 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1496 if (bp->state == BNX2X_STATE_CLOSED) {
1497 /* Interface has been removed - nothing to recover */
1498 bp->recovery_state = BNX2X_RECOVERY_DONE;
1500 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1507 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1509 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1511 /* Set "drop all" */
1512 bp->rx_mode = BNX2X_RX_MODE_NONE;
1513 bnx2x_set_storm_rx_mode(bp);
1515 /* Disable HW interrupts, NAPI and Tx */
1516 bnx2x_netif_stop(bp, 1);
1517 netif_carrier_off(bp->dev);
1519 del_timer_sync(&bp->timer);
1520 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1521 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1522 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1525 bnx2x_free_irq(bp, false);
1527 /* Cleanup the chip if needed */
1528 if (unload_mode != UNLOAD_RECOVERY)
1529 bnx2x_chip_cleanup(bp, unload_mode);
1533 /* Free SKBs, SGEs, TPA pool and driver internals */
1534 bnx2x_free_skbs(bp);
1535 for_each_queue(bp, i)
1536 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1537 for_each_queue(bp, i)
1538 netif_napi_del(&bnx2x_fp(bp, i, napi));
1541 bp->state = BNX2X_STATE_CLOSED;
1543 /* The last driver must disable a "close the gate" if there is no
1544 * parity attention or "process kill" pending.
1546 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1547 bnx2x_reset_is_done(bp))
1548 bnx2x_disable_close_the_gate(bp);
1550 /* Reset MCP mail box sequence if there is on going recovery */
1551 if (unload_mode == UNLOAD_RECOVERY)
1556 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1560 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1564 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1565 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1566 PCI_PM_CTRL_PME_STATUS));
1568 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1569 /* delay required during transition out of D3hot */
1574 /* If there are other clients above don't
1575 shut down the power */
1576 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1578 /* Don't shut down the power for emulation and FPGA */
1579 if (CHIP_REV_IS_SLOW(bp))
1582 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1586 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1588 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1591 /* No more memory access after this point until
1592 * device is brought back to D0.
1605 * net_device service functions
1608 static int bnx2x_poll(struct napi_struct *napi, int budget)
1611 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1613 struct bnx2x *bp = fp->bp;
1616 #ifdef BNX2X_STOP_ON_ERROR
1617 if (unlikely(bp->panic)) {
1618 napi_complete(napi);
1623 if (bnx2x_has_tx_work(fp))
1626 if (bnx2x_has_rx_work(fp)) {
1627 work_done += bnx2x_rx_int(fp, budget - work_done);
1629 /* must not complete if we consumed full budget */
1630 if (work_done >= budget)
1634 /* Fall out from the NAPI loop if needed */
1635 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1636 bnx2x_update_fpsb_idx(fp);
1637 /* bnx2x_has_rx_work() reads the status block, thus we need
1638 * to ensure that status block indices have been actually read
1639 * (bnx2x_update_fpsb_idx) prior to this check
1640 * (bnx2x_has_rx_work) so that we won't write the "newer"
1641 * value of the status block to IGU (if there was a DMA right
1642 * after bnx2x_has_rx_work and if there is no rmb, the memory
1643 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1644 * before bnx2x_ack_sb). In this case there will never be
1645 * another interrupt until there is another update of the
1646 * status block, while there is still unhandled work.
1650 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1651 napi_complete(napi);
1652 /* Re-enable interrupts */
1653 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1654 le16_to_cpu(fp->fp_c_idx),
1656 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1657 le16_to_cpu(fp->fp_u_idx),
1668 /* we split the first BD into headers and data BDs
1669 * to ease the pain of our fellow microcode engineers
1670 * we use one mapping for both BDs
1671 * So far this has only been observed to happen
1672 * in Other Operating Systems(TM)
1674 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1675 struct bnx2x_fastpath *fp,
1676 struct sw_tx_bd *tx_buf,
1677 struct eth_tx_start_bd **tx_bd, u16 hlen,
1678 u16 bd_prod, int nbd)
1680 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1681 struct eth_tx_bd *d_tx_bd;
1683 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1685 /* first fix first BD */
1686 h_tx_bd->nbd = cpu_to_le16(nbd);
1687 h_tx_bd->nbytes = cpu_to_le16(hlen);
1689 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1690 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1691 h_tx_bd->addr_lo, h_tx_bd->nbd);
1693 /* now get a new data BD
1694 * (after the pbd) and fill it */
1695 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1696 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1698 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1699 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1701 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1702 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1703 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1705 /* this marks the BD as one that has no individual mapping */
1706 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1708 DP(NETIF_MSG_TX_QUEUED,
1709 "TSO split data size is %d (%x:%x)\n",
1710 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1713 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1718 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1721 csum = (u16) ~csum_fold(csum_sub(csum,
1722 csum_partial(t_header - fix, fix, 0)));
1725 csum = (u16) ~csum_fold(csum_add(csum,
1726 csum_partial(t_header, -fix, 0)));
1728 return swab16(csum);
1731 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1735 if (skb->ip_summed != CHECKSUM_PARTIAL)
1739 if (skb->protocol == htons(ETH_P_IPV6)) {
1741 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1742 rc |= XMIT_CSUM_TCP;
1746 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1747 rc |= XMIT_CSUM_TCP;
1751 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1752 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1754 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1755 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1760 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1761 /* check if packet requires linearization (packet is too fragmented)
1762 no need to check fragmentation if page size > 8K (there will be no
1763 violation to FW restrictions) */
1764 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1769 int first_bd_sz = 0;
1771 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1772 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1774 if (xmit_type & XMIT_GSO) {
1775 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1776 /* Check if LSO packet needs to be copied:
1777 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1778 int wnd_size = MAX_FETCH_BD - 3;
1779 /* Number of windows to check */
1780 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1785 /* Headers length */
1786 hlen = (int)(skb_transport_header(skb) - skb->data) +
1789 /* Amount of data (w/o headers) on linear part of SKB*/
1790 first_bd_sz = skb_headlen(skb) - hlen;
1792 wnd_sum = first_bd_sz;
1794 /* Calculate the first sum - it's special */
1795 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1797 skb_shinfo(skb)->frags[frag_idx].size;
1799 /* If there was data on linear skb data - check it */
1800 if (first_bd_sz > 0) {
1801 if (unlikely(wnd_sum < lso_mss)) {
1806 wnd_sum -= first_bd_sz;
1809 /* Others are easier: run through the frag list and
1810 check all windows */
1811 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1813 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1815 if (unlikely(wnd_sum < lso_mss)) {
1820 skb_shinfo(skb)->frags[wnd_idx].size;
1823 /* in non-LSO too fragmented packet should always
1830 if (unlikely(to_copy))
1831 DP(NETIF_MSG_TX_QUEUED,
1832 "Linearization IS REQUIRED for %s packet. "
1833 "num_frags %d hlen %d first_bd_sz %d\n",
1834 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1835 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1841 /* called with netif_tx_lock
1842 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1843 * netif_wake_queue()
1845 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1847 struct bnx2x *bp = netdev_priv(dev);
1848 struct bnx2x_fastpath *fp;
1849 struct netdev_queue *txq;
1850 struct sw_tx_bd *tx_buf;
1851 struct eth_tx_start_bd *tx_start_bd;
1852 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1853 struct eth_tx_parse_bd *pbd = NULL;
1854 u16 pkt_prod, bd_prod;
1857 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1860 __le16 pkt_size = 0;
1862 u8 mac_type = UNICAST_ADDRESS;
1864 #ifdef BNX2X_STOP_ON_ERROR
1865 if (unlikely(bp->panic))
1866 return NETDEV_TX_BUSY;
1869 fp_index = skb_get_queue_mapping(skb);
1870 txq = netdev_get_tx_queue(dev, fp_index);
1872 fp = &bp->fp[fp_index];
1874 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1875 fp->eth_q_stats.driver_xoff++;
1876 netif_tx_stop_queue(txq);
1877 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1878 return NETDEV_TX_BUSY;
1881 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1882 " gso type %x xmit_type %x\n",
1883 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1884 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1886 eth = (struct ethhdr *)skb->data;
1888 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1889 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1890 if (is_broadcast_ether_addr(eth->h_dest))
1891 mac_type = BROADCAST_ADDRESS;
1893 mac_type = MULTICAST_ADDRESS;
1896 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1897 /* First, check if we need to linearize the skb (due to FW
1898 restrictions). No need to check fragmentation if page size > 8K
1899 (there will be no violation to FW restrictions) */
1900 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1901 /* Statistics of linearization */
1903 if (skb_linearize(skb) != 0) {
1904 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1905 "silently dropping this SKB\n");
1906 dev_kfree_skb_any(skb);
1907 return NETDEV_TX_OK;
1913 Please read carefully. First we use one BD which we mark as start,
1914 then we have a parsing info BD (used for TSO or xsum),
1915 and only then we have the rest of the TSO BDs.
1916 (don't forget to mark the last one as last,
1917 and to unmap only AFTER you write to the BD ...)
1918 And above all, all pdb sizes are in words - NOT DWORDS!
1921 pkt_prod = fp->tx_pkt_prod++;
1922 bd_prod = TX_BD(fp->tx_bd_prod);
1924 /* get a tx_buf and first BD */
1925 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1926 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1928 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1929 tx_start_bd->general_data = (mac_type <<
1930 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1932 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1934 /* remember the first BD of the packet */
1935 tx_buf->first_bd = fp->tx_bd_prod;
1939 DP(NETIF_MSG_TX_QUEUED,
1940 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1941 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1944 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1945 (bp->flags & HW_VLAN_TX_FLAG)) {
1946 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1947 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1950 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1952 /* turn on parsing and get a BD */
1953 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1954 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1956 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1958 if (xmit_type & XMIT_CSUM) {
1959 hlen = (skb_network_header(skb) - skb->data) / 2;
1961 /* for now NS flag is not used in Linux */
1963 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1964 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1966 pbd->ip_hlen = (skb_transport_header(skb) -
1967 skb_network_header(skb)) / 2;
1969 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1971 pbd->total_hlen = cpu_to_le16(hlen);
1974 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1976 if (xmit_type & XMIT_CSUM_V4)
1977 tx_start_bd->bd_flags.as_bitfield |=
1978 ETH_TX_BD_FLAGS_IP_CSUM;
1980 tx_start_bd->bd_flags.as_bitfield |=
1981 ETH_TX_BD_FLAGS_IPV6;
1983 if (xmit_type & XMIT_CSUM_TCP) {
1984 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1987 s8 fix = SKB_CS_OFF(skb); /* signed! */
1989 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1991 DP(NETIF_MSG_TX_QUEUED,
1992 "hlen %d fix %d csum before fix %x\n",
1993 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1995 /* HW bug: fixup the CSUM */
1996 pbd->tcp_pseudo_csum =
1997 bnx2x_csum_fix(skb_transport_header(skb),
2000 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2001 pbd->tcp_pseudo_csum);
2005 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2006 skb_headlen(skb), DMA_TO_DEVICE);
2008 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2009 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2010 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2011 tx_start_bd->nbd = cpu_to_le16(nbd);
2012 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2013 pkt_size = tx_start_bd->nbytes;
2015 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2016 " nbytes %d flags %x vlan %x\n",
2017 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2018 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2019 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
2021 if (xmit_type & XMIT_GSO) {
2023 DP(NETIF_MSG_TX_QUEUED,
2024 "TSO packet len %d hlen %d total len %d tso size %d\n",
2025 skb->len, hlen, skb_headlen(skb),
2026 skb_shinfo(skb)->gso_size);
2028 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2030 if (unlikely(skb_headlen(skb) > hlen))
2031 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2032 hlen, bd_prod, ++nbd);
2034 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2035 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2036 pbd->tcp_flags = pbd_tcp_flags(skb);
2038 if (xmit_type & XMIT_GSO_V4) {
2039 pbd->ip_id = swab16(ip_hdr(skb)->id);
2040 pbd->tcp_pseudo_csum =
2041 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2043 0, IPPROTO_TCP, 0));
2046 pbd->tcp_pseudo_csum =
2047 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2048 &ipv6_hdr(skb)->daddr,
2049 0, IPPROTO_TCP, 0));
2051 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2053 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2055 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2056 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2058 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2059 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2060 if (total_pkt_bd == NULL)
2061 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2063 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2065 frag->size, DMA_TO_DEVICE);
2067 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2068 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2069 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2070 le16_add_cpu(&pkt_size, frag->size);
2072 DP(NETIF_MSG_TX_QUEUED,
2073 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2074 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2075 le16_to_cpu(tx_data_bd->nbytes));
2078 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2080 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2082 /* now send a tx doorbell, counting the next BD
2083 * if the packet contains or ends with it
2085 if (TX_BD_POFF(bd_prod) < nbd)
2088 if (total_pkt_bd != NULL)
2089 total_pkt_bd->total_pkt_bytes = pkt_size;
2092 DP(NETIF_MSG_TX_QUEUED,
2093 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2094 " tcp_flags %x xsum %x seq %u hlen %u\n",
2095 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2096 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2097 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2099 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2102 * Make sure that the BD data is updated before updating the producer
2103 * since FW might read the BD right after the producer is updated.
2104 * This is only applicable for weak-ordered memory model archs such
2105 * as IA-64. The following barrier is also mandatory since FW will
2106 * assumes packets must have BDs.
2110 fp->tx_db.data.prod += nbd;
2112 DOORBELL(bp, fp->index, fp->tx_db.raw);
2116 fp->tx_bd_prod += nbd;
2118 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2119 netif_tx_stop_queue(txq);
2121 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2122 * ordering of set_bit() in netif_tx_stop_queue() and read of
2126 fp->eth_q_stats.driver_xoff++;
2127 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2128 netif_tx_wake_queue(txq);
2132 return NETDEV_TX_OK;
2134 /* called with rtnl_lock */
2135 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2137 struct sockaddr *addr = p;
2138 struct bnx2x *bp = netdev_priv(dev);
2140 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2143 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2144 if (netif_running(dev)) {
2146 bnx2x_set_eth_mac_addr_e1(bp, 1);
2148 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2154 /* called with rtnl_lock */
2155 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2157 struct bnx2x *bp = netdev_priv(dev);
2160 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2161 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2165 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2166 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2169 /* This does not race with packet allocation
2170 * because the actual alloc size is
2171 * only updated as part of load
2175 if (netif_running(dev)) {
2176 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2177 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2183 void bnx2x_tx_timeout(struct net_device *dev)
2185 struct bnx2x *bp = netdev_priv(dev);
2187 #ifdef BNX2X_STOP_ON_ERROR
2191 /* This allows the netif to be shutdown gracefully before resetting */
2192 schedule_delayed_work(&bp->reset_task, 0);
2196 /* called with rtnl_lock */
2197 void bnx2x_vlan_rx_register(struct net_device *dev,
2198 struct vlan_group *vlgrp)
2200 struct bnx2x *bp = netdev_priv(dev);
2204 /* Set flags according to the required capabilities */
2205 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2207 if (dev->features & NETIF_F_HW_VLAN_TX)
2208 bp->flags |= HW_VLAN_TX_FLAG;
2210 if (dev->features & NETIF_F_HW_VLAN_RX)
2211 bp->flags |= HW_VLAN_RX_FLAG;
2213 if (netif_running(dev))
2214 bnx2x_set_client_config(bp);
2218 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2220 struct net_device *dev = pci_get_drvdata(pdev);
2224 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2227 bp = netdev_priv(dev);
2231 pci_save_state(pdev);
2233 if (!netif_running(dev)) {
2238 netif_device_detach(dev);
2240 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2242 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2249 int bnx2x_resume(struct pci_dev *pdev)
2251 struct net_device *dev = pci_get_drvdata(pdev);
2256 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2259 bp = netdev_priv(dev);
2261 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2262 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2268 pci_restore_state(pdev);
2270 if (!netif_running(dev)) {
2275 bnx2x_set_power_state(bp, PCI_D0);
2276 netif_device_attach(dev);
2278 rc = bnx2x_nic_load(bp, LOAD_OPEN);