1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
19 #include <linux/etherdevice.h>
21 #include <linux/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
27 #include <linux/if_vlan.h>
30 #include "bnx2x_init.h"
32 static int bnx2x_poll(struct napi_struct *napi, int budget);
34 /* free skb in the packet ring at pos idx
35 * return idx of last bd freed
37 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
40 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
41 struct eth_tx_start_bd *tx_start_bd;
42 struct eth_tx_bd *tx_data_bd;
43 struct sk_buff *skb = tx_buf->skb;
44 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
47 /* prefetch skb end pointer to speedup dev_kfree_skb() */
50 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
54 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
55 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
56 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
57 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
59 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
60 #ifdef BNX2X_STOP_ON_ERROR
61 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
62 BNX2X_ERR("BAD nbd!\n");
66 new_cons = nbd + tx_buf->first_bd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
71 /* Skip a parse bd... */
73 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 /* ...and the TSO split header bd since they have no mapping */
76 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
78 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
84 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
85 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
86 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
87 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
89 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
101 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
103 struct bnx2x *bp = fp->bp;
104 struct netdev_queue *txq;
105 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
107 #ifdef BNX2X_STOP_ON_ERROR
108 if (unlikely(bp->panic))
112 txq = netdev_get_tx_queue(bp->dev, fp->index);
113 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
114 sw_cons = fp->tx_pkt_cons;
116 while (sw_cons != hw_cons) {
119 pkt_cons = TX_BD(sw_cons);
121 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
123 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
124 hw_cons, sw_cons, pkt_cons);
126 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
128 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
131 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
135 fp->tx_pkt_cons = sw_cons;
136 fp->tx_bd_cons = bd_cons;
138 /* Need to make the tx_bd_cons update visible to start_xmit()
139 * before checking for netif_tx_queue_stopped(). Without the
140 * memory barrier, there is a small possibility that
141 * start_xmit() will miss it and cause the queue to be stopped
146 /* TBD need a thresh? */
147 if (unlikely(netif_tx_queue_stopped(txq))) {
148 /* Taking tx_lock() is needed to prevent reenabling the queue
149 * while it's empty. This could have happen if rx_action() gets
150 * suspended in bnx2x_tx_int() after the condition before
151 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
153 * stops the queue->sees fresh tx_bd_cons->releases the queue->
154 * sends some packets consuming the whole queue again->
158 __netif_tx_lock(txq, smp_processor_id());
160 if ((netif_tx_queue_stopped(txq)) &&
161 (bp->state == BNX2X_STATE_OPEN) &&
162 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
163 netif_tx_wake_queue(txq);
165 __netif_tx_unlock(txq);
170 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
173 u16 last_max = fp->last_max_sge;
175 if (SUB_S16(idx, last_max) > 0)
176 fp->last_max_sge = idx;
179 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
180 struct eth_fast_path_rx_cqe *fp_cqe)
182 struct bnx2x *bp = fp->bp;
183 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
184 le16_to_cpu(fp_cqe->len_on_bd)) >>
186 u16 last_max, last_elem, first_elem;
193 /* First mark all used pages */
194 for (i = 0; i < sge_len; i++)
195 SGE_MASK_CLEAR_BIT(fp,
196 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
198 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
199 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
201 /* Here we assume that the last SGE index is the biggest */
202 prefetch((void *)(fp->sge_mask));
203 bnx2x_update_last_max_sge(fp,
204 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
206 last_max = RX_SGE(fp->last_max_sge);
207 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
208 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
210 /* If ring is not full */
211 if (last_elem + 1 != first_elem)
214 /* Now update the prod */
215 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
216 if (likely(fp->sge_mask[i]))
219 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
220 delta += RX_SGE_MASK_ELEM_SZ;
224 fp->rx_sge_prod += delta;
225 /* clear page-end entries */
226 bnx2x_clear_sge_mask_next_elems(fp);
229 DP(NETIF_MSG_RX_STATUS,
230 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
231 fp->last_max_sge, fp->rx_sge_prod);
234 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
235 struct sk_buff *skb, u16 cons, u16 prod)
237 struct bnx2x *bp = fp->bp;
238 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
239 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
240 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
243 /* move empty skb from pool to prod and map it */
244 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
245 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
246 bp->rx_buf_size, DMA_FROM_DEVICE);
247 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
249 /* move partial skb from cons to pool (don't unmap yet) */
250 fp->tpa_pool[queue] = *cons_rx_buf;
252 /* mark bin state as start - print error if current state != stop */
253 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
254 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
256 fp->tpa_state[queue] = BNX2X_TPA_START;
258 /* point prod_bd to new skb */
259 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
260 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
262 #ifdef BNX2X_STOP_ON_ERROR
263 fp->tpa_queue_used |= (1 << queue);
264 #ifdef _ASM_GENERIC_INT_L64_H
265 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
267 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
273 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
275 struct eth_fast_path_rx_cqe *fp_cqe,
278 struct sw_rx_page *rx_pg, old_rx_pg;
279 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
280 u32 i, frag_len, frag_size, pages;
284 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
285 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
287 /* This is needed in order to enable forwarding support */
289 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
290 max(frag_size, (u32)len_on_bd));
292 #ifdef BNX2X_STOP_ON_ERROR
293 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
294 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
296 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
297 fp_cqe->pkt_len, len_on_bd);
303 /* Run through the SGL and compose the fragmented skb */
304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
306 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
308 /* FW gives the indices of the SGE as if the ring is an array
309 (meaning that "next" element will consume 2 indices) */
310 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
311 rx_pg = &fp->rx_page_ring[sge_idx];
314 /* If we fail to allocate a substitute page, we simply stop
315 where we are and drop the whole packet */
316 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
318 fp->eth_q_stats.rx_skb_alloc_failed++;
322 /* Unmap the page as we r going to pass it to the stack */
323 dma_unmap_page(&bp->pdev->dev,
324 dma_unmap_addr(&old_rx_pg, mapping),
325 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
327 /* Add one frag and update the appropriate fields in the skb */
328 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
330 skb->data_len += frag_len;
331 skb->truesize += frag_len;
332 skb->len += frag_len;
334 frag_size -= frag_len;
340 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
341 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
344 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
345 struct sk_buff *skb = rx_buf->skb;
347 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
349 /* Unmap skb in the pool anyway, as we are going to change
350 pool entry status to BNX2X_TPA_STOP even if new skb allocation
352 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
353 bp->rx_buf_size, DMA_FROM_DEVICE);
355 if (likely(new_skb)) {
356 /* fix ip xsum and give it to the stack */
357 /* (no need to map the new skb) */
360 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
362 int is_not_hwaccel_vlan_cqe =
363 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
367 prefetch(((char *)(skb)) + 128);
369 #ifdef BNX2X_STOP_ON_ERROR
370 if (pad + len > bp->rx_buf_size) {
371 BNX2X_ERR("skb_put is about to fail... "
372 "pad %d len %d rx_buf_size %d\n",
373 pad, len, bp->rx_buf_size);
379 skb_reserve(skb, pad);
382 skb->protocol = eth_type_trans(skb, bp->dev);
383 skb->ip_summed = CHECKSUM_UNNECESSARY;
388 iph = (struct iphdr *)skb->data;
390 /* If there is no Rx VLAN offloading -
391 take VLAN tag into an account */
392 if (unlikely(is_not_hwaccel_vlan_cqe))
393 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
396 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
399 if (!bnx2x_fill_frag_skb(bp, fp, skb,
400 &cqe->fast_path_cqe, cqe_idx)) {
402 if ((bp->vlgrp != NULL) &&
403 (le16_to_cpu(cqe->fast_path_cqe.
404 pars_flags.flags) & PARSING_FLAGS_VLAN))
405 vlan_gro_receive(&fp->napi, bp->vlgrp,
406 le16_to_cpu(cqe->fast_path_cqe.
410 napi_gro_receive(&fp->napi, skb);
412 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
413 " - dropping packet!\n");
418 /* put new skb in bin */
419 fp->tpa_pool[queue].skb = new_skb;
422 /* else drop the packet and keep the buffer in the bin */
423 DP(NETIF_MSG_RX_STATUS,
424 "Failed to allocate new skb - dropping packet!\n");
425 fp->eth_q_stats.rx_skb_alloc_failed++;
428 fp->tpa_state[queue] = BNX2X_TPA_STOP;
431 /* Set Toeplitz hash value in the skb using the value from the
432 * CQE (calculated by HW).
434 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
437 /* Set Toeplitz hash from CQE */
438 if ((bp->dev->features & NETIF_F_RXHASH) &&
439 (cqe->fast_path_cqe.status_flags &
440 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
442 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
445 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
447 struct bnx2x *bp = fp->bp;
448 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
449 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
452 #ifdef BNX2X_STOP_ON_ERROR
453 if (unlikely(bp->panic))
457 /* CQ "next element" is of the size of the regular element,
458 that's why it's ok here */
459 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
460 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
463 bd_cons = fp->rx_bd_cons;
464 bd_prod = fp->rx_bd_prod;
465 bd_prod_fw = bd_prod;
466 sw_comp_cons = fp->rx_comp_cons;
467 sw_comp_prod = fp->rx_comp_prod;
469 /* Memory barrier necessary as speculative reads of the rx
470 * buffer can be ahead of the index in the status block
474 DP(NETIF_MSG_RX_STATUS,
475 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
476 fp->index, hw_comp_cons, sw_comp_cons);
478 while (sw_comp_cons != hw_comp_cons) {
479 struct sw_rx_bd *rx_buf = NULL;
481 union eth_rx_cqe *cqe;
485 comp_ring_cons = RCQ_BD(sw_comp_cons);
486 bd_prod = RX_BD(bd_prod);
487 bd_cons = RX_BD(bd_cons);
489 /* Prefetch the page containing the BD descriptor
490 at producer's index. It will be needed when new skb is
492 prefetch((void *)(PAGE_ALIGN((unsigned long)
493 (&fp->rx_desc_ring[bd_prod])) -
496 cqe = &fp->rx_comp_ring[comp_ring_cons];
497 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
499 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
500 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
501 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
502 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
503 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
504 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
506 /* is this a slowpath msg? */
507 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
508 bnx2x_sp_event(fp, cqe);
511 /* this is an rx packet */
513 rx_buf = &fp->rx_buf_ring[bd_cons];
516 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
517 pad = cqe->fast_path_cqe.placement_offset;
519 /* If CQE is marked both TPA_START and TPA_END
520 it is a non-TPA CQE */
521 if ((!fp->disable_tpa) &&
522 (TPA_TYPE(cqe_fp_flags) !=
523 (TPA_TYPE_START | TPA_TYPE_END))) {
524 u16 queue = cqe->fast_path_cqe.queue_index;
526 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
527 DP(NETIF_MSG_RX_STATUS,
528 "calling tpa_start on queue %d\n",
531 bnx2x_tpa_start(fp, queue, skb,
534 /* Set Toeplitz hash for an LRO skb */
535 bnx2x_set_skb_rxhash(bp, cqe, skb);
540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
541 DP(NETIF_MSG_RX_STATUS,
542 "calling tpa_stop on queue %d\n",
545 if (!BNX2X_RX_SUM_FIX(cqe))
546 BNX2X_ERR("STOP on none TCP "
549 /* This is a size of the linear data
551 len = le16_to_cpu(cqe->fast_path_cqe.
553 bnx2x_tpa_stop(bp, fp, queue, pad,
554 len, cqe, comp_ring_cons);
555 #ifdef BNX2X_STOP_ON_ERROR
560 bnx2x_update_sge_prod(fp,
561 &cqe->fast_path_cqe);
566 dma_sync_single_for_device(&bp->pdev->dev,
567 dma_unmap_addr(rx_buf, mapping),
568 pad + RX_COPY_THRESH,
570 prefetch(((char *)(skb)) + 128);
572 /* is this an error packet? */
573 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
575 "ERROR flags %x rx packet %u\n",
576 cqe_fp_flags, sw_comp_cons);
577 fp->eth_q_stats.rx_err_discard_pkt++;
581 /* Since we don't have a jumbo ring
582 * copy small packets if mtu > 1500
584 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
585 (len <= RX_COPY_THRESH)) {
586 struct sk_buff *new_skb;
588 new_skb = netdev_alloc_skb(bp->dev,
590 if (new_skb == NULL) {
592 "ERROR packet dropped "
593 "because of alloc failure\n");
594 fp->eth_q_stats.rx_skb_alloc_failed++;
599 skb_copy_from_linear_data_offset(skb, pad,
600 new_skb->data + pad, len);
601 skb_reserve(new_skb, pad);
602 skb_put(new_skb, len);
604 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
609 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
610 dma_unmap_single(&bp->pdev->dev,
611 dma_unmap_addr(rx_buf, mapping),
614 skb_reserve(skb, pad);
619 "ERROR packet dropped because "
620 "of alloc failure\n");
621 fp->eth_q_stats.rx_skb_alloc_failed++;
623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
627 skb->protocol = eth_type_trans(skb, bp->dev);
629 /* Set Toeplitz hash for a none-LRO skb */
630 bnx2x_set_skb_rxhash(bp, cqe, skb);
632 skb_checksum_none_assert(skb);
634 if (likely(BNX2X_RX_CSUM_OK(cqe)))
635 skb->ip_summed = CHECKSUM_UNNECESSARY;
637 fp->eth_q_stats.hw_csum_err++;
641 skb_record_rx_queue(skb, fp->index);
644 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
645 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
647 vlan_gro_receive(&fp->napi, bp->vlgrp,
648 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
651 napi_gro_receive(&fp->napi, skb);
657 bd_cons = NEXT_RX_IDX(bd_cons);
658 bd_prod = NEXT_RX_IDX(bd_prod);
659 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
662 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
663 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
665 if (rx_pkt == budget)
669 fp->rx_bd_cons = bd_cons;
670 fp->rx_bd_prod = bd_prod_fw;
671 fp->rx_comp_cons = sw_comp_cons;
672 fp->rx_comp_prod = sw_comp_prod;
674 /* Update producers */
675 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
678 fp->rx_pkt += rx_pkt;
684 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
686 struct bnx2x_fastpath *fp = fp_cookie;
687 struct bnx2x *bp = fp->bp;
689 /* Return here if interrupt is disabled */
690 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
691 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
695 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
696 "[fp %d fw_sd %d igusb %d]\n",
697 fp->index, fp->fw_sb_id, fp->igu_sb_id);
698 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
700 #ifdef BNX2X_STOP_ON_ERROR
701 if (unlikely(bp->panic))
705 /* Handle Rx and Tx according to MSI-X vector */
706 prefetch(fp->rx_cons_sb);
707 prefetch(fp->tx_cons_sb);
708 prefetch(&fp->sb_running_index[SM_RX_ID]);
709 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
715 /* HW Lock for shared dual port PHYs */
716 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
718 mutex_lock(&bp->port.phy_mutex);
720 if (bp->port.need_hw_lock)
721 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
724 void bnx2x_release_phy_lock(struct bnx2x *bp)
726 if (bp->port.need_hw_lock)
727 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
729 mutex_unlock(&bp->port.phy_mutex);
732 void bnx2x_link_report(struct bnx2x *bp)
734 if (bp->flags & MF_FUNC_DIS) {
735 netif_carrier_off(bp->dev);
736 netdev_err(bp->dev, "NIC Link is Down\n");
740 if (bp->link_vars.link_up) {
743 if (bp->state == BNX2X_STATE_OPEN)
744 netif_carrier_on(bp->dev);
745 netdev_info(bp->dev, "NIC Link is Up, ");
747 line_speed = bp->link_vars.line_speed;
752 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
753 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
754 if (vn_max_rate < line_speed)
755 line_speed = vn_max_rate;
757 pr_cont("%d Mbps ", line_speed);
759 if (bp->link_vars.duplex == DUPLEX_FULL)
760 pr_cont("full duplex");
762 pr_cont("half duplex");
764 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
765 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
766 pr_cont(", receive ");
767 if (bp->link_vars.flow_ctrl &
769 pr_cont("& transmit ");
771 pr_cont(", transmit ");
773 pr_cont("flow control ON");
777 } else { /* link_down */
778 netif_carrier_off(bp->dev);
779 netdev_err(bp->dev, "NIC Link is Down\n");
783 /* Returns the number of actually allocated BDs */
784 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
787 struct bnx2x *bp = fp->bp;
788 u16 ring_prod, cqe_ring_prod;
791 fp->rx_comp_cons = 0;
792 cqe_ring_prod = ring_prod = 0;
793 for (i = 0; i < rx_ring_size; i++) {
794 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
795 BNX2X_ERR("was only able to allocate "
796 "%d rx skbs on queue[%d]\n", i, fp->index);
797 fp->eth_q_stats.rx_skb_alloc_failed++;
800 ring_prod = NEXT_RX_IDX(ring_prod);
801 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
802 WARN_ON(ring_prod <= i);
805 fp->rx_bd_prod = ring_prod;
806 /* Limit the CQE producer by the CQE ring size */
807 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
809 fp->rx_pkt = fp->rx_calls = 0;
814 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
816 struct bnx2x *bp = fp->bp;
817 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
818 MAX_RX_AVAIL/bp->num_queues;
820 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
822 bnx2x_alloc_rx_bds(fp, rx_ring_size);
825 * this will generate an interrupt (to the TSTORM)
826 * must only be done after chip is initialized
828 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
832 void bnx2x_init_rx_rings(struct bnx2x *bp)
834 int func = BP_FUNC(bp);
835 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
836 ETH_MAX_AGGREGATION_QUEUES_E1H;
840 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
841 BNX2X_FW_IP_HDR_ALIGN_PAD;
844 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
846 for_each_queue(bp, j) {
847 struct bnx2x_fastpath *fp = &bp->fp[j];
849 if (!fp->disable_tpa) {
850 for (i = 0; i < max_agg_queues; i++) {
851 fp->tpa_pool[i].skb =
852 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
853 if (!fp->tpa_pool[i].skb) {
854 BNX2X_ERR("Failed to allocate TPA "
855 "skb pool for queue[%d] - "
856 "disabling TPA on this "
858 bnx2x_free_tpa_pool(bp, fp, i);
862 dma_unmap_addr_set((struct sw_rx_bd *)
863 &bp->fp->tpa_pool[i],
865 fp->tpa_state[i] = BNX2X_TPA_STOP;
868 /* "next page" elements initialization */
869 bnx2x_set_next_page_sgl(fp);
871 /* set SGEs bit mask */
872 bnx2x_init_sge_ring_bit_mask(fp);
874 /* Allocate SGEs and initialize the ring elements */
875 for (i = 0, ring_prod = 0;
876 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
878 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
879 BNX2X_ERR("was only able to allocate "
881 BNX2X_ERR("disabling TPA for"
883 /* Cleanup already allocated elements */
884 bnx2x_free_rx_sge_range(bp,
886 bnx2x_free_tpa_pool(bp,
892 ring_prod = NEXT_SGE_IDX(ring_prod);
895 fp->rx_sge_prod = ring_prod;
899 for_each_queue(bp, j) {
900 struct bnx2x_fastpath *fp = &bp->fp[j];
904 bnx2x_set_next_page_rx_bd(fp);
907 bnx2x_set_next_page_rx_cq(fp);
909 /* Allocate BDs and initialize BD ring */
910 bnx2x_alloc_rx_bd_ring(fp);
915 REG_WR(bp, BAR_USTRORM_INTMEM +
916 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
917 U64_LO(fp->rx_comp_mapping));
918 REG_WR(bp, BAR_USTRORM_INTMEM +
919 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
920 U64_HI(fp->rx_comp_mapping));
924 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
928 for_each_queue(bp, i) {
929 struct bnx2x_fastpath *fp = &bp->fp[i];
931 u16 bd_cons = fp->tx_bd_cons;
932 u16 sw_prod = fp->tx_pkt_prod;
933 u16 sw_cons = fp->tx_pkt_cons;
935 while (sw_cons != sw_prod) {
936 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
942 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
946 for_each_queue(bp, j) {
947 struct bnx2x_fastpath *fp = &bp->fp[j];
949 for (i = 0; i < NUM_RX_BD; i++) {
950 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
951 struct sk_buff *skb = rx_buf->skb;
956 dma_unmap_single(&bp->pdev->dev,
957 dma_unmap_addr(rx_buf, mapping),
958 bp->rx_buf_size, DMA_FROM_DEVICE);
963 if (!fp->disable_tpa)
964 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
965 ETH_MAX_AGGREGATION_QUEUES_E1 :
966 ETH_MAX_AGGREGATION_QUEUES_E1H);
970 void bnx2x_free_skbs(struct bnx2x *bp)
972 bnx2x_free_tx_skbs(bp);
973 bnx2x_free_rx_skbs(bp);
976 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
980 free_irq(bp->msix_table[0].vector, bp->dev);
981 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
982 bp->msix_table[0].vector);
987 for_each_queue(bp, i) {
988 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
989 "state %x\n", i, bp->msix_table[i + offset].vector,
990 bnx2x_fp(bp, i, state));
992 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
996 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
998 if (bp->flags & USING_MSIX_FLAG) {
1000 bnx2x_free_msix_irqs(bp);
1001 pci_disable_msix(bp->pdev);
1002 bp->flags &= ~USING_MSIX_FLAG;
1004 } else if (bp->flags & USING_MSI_FLAG) {
1006 free_irq(bp->pdev->irq, bp->dev);
1007 pci_disable_msi(bp->pdev);
1008 bp->flags &= ~USING_MSI_FLAG;
1010 } else if (!disable_only)
1011 free_irq(bp->pdev->irq, bp->dev);
1014 static int bnx2x_enable_msix(struct bnx2x *bp)
1016 int i, rc, offset = 1;
1019 bp->msix_table[0].entry = igu_vec;
1020 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1023 igu_vec = BP_L_ID(bp) + offset;
1024 bp->msix_table[1].entry = igu_vec;
1025 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1028 for_each_queue(bp, i) {
1029 igu_vec = BP_L_ID(bp) + offset + i;
1030 bp->msix_table[i + offset].entry = igu_vec;
1031 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1032 "(fastpath #%u)\n", i + offset, igu_vec, i);
1035 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1036 BNX2X_NUM_QUEUES(bp) + offset);
1039 * reconfigure number of tx/rx queues according to available
1042 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1043 /* vectors available for FP */
1044 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1047 "Trying to use less MSI-X vectors: %d\n", rc);
1049 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1053 "MSI-X is not attainable rc %d\n", rc);
1057 bp->num_queues = min(bp->num_queues, fp_vec);
1059 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1062 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1066 bp->flags |= USING_MSIX_FLAG;
1071 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1073 int i, rc, offset = 1;
1075 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1076 bp->dev->name, bp->dev);
1078 BNX2X_ERR("request sp irq failed\n");
1085 for_each_queue(bp, i) {
1086 struct bnx2x_fastpath *fp = &bp->fp[i];
1087 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1090 rc = request_irq(bp->msix_table[i + offset].vector,
1091 bnx2x_msix_fp_int, 0, fp->name, fp);
1093 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1094 bnx2x_free_msix_irqs(bp);
1098 fp->state = BNX2X_FP_STATE_IRQ;
1101 i = BNX2X_NUM_QUEUES(bp);
1102 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1104 bp->msix_table[0].vector,
1105 0, bp->msix_table[offset].vector,
1106 i - 1, bp->msix_table[offset + i - 1].vector);
1111 static int bnx2x_enable_msi(struct bnx2x *bp)
1115 rc = pci_enable_msi(bp->pdev);
1117 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1120 bp->flags |= USING_MSI_FLAG;
1125 static int bnx2x_req_irq(struct bnx2x *bp)
1127 unsigned long flags;
1130 if (bp->flags & USING_MSI_FLAG)
1133 flags = IRQF_SHARED;
1135 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1136 bp->dev->name, bp->dev);
1138 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1143 static void bnx2x_napi_enable(struct bnx2x *bp)
1147 for_each_queue(bp, i)
1148 napi_enable(&bnx2x_fp(bp, i, napi));
1151 static void bnx2x_napi_disable(struct bnx2x *bp)
1155 for_each_queue(bp, i)
1156 napi_disable(&bnx2x_fp(bp, i, napi));
1159 void bnx2x_netif_start(struct bnx2x *bp)
1163 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1164 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1167 if (netif_running(bp->dev)) {
1168 bnx2x_napi_enable(bp);
1169 bnx2x_int_enable(bp);
1170 if (bp->state == BNX2X_STATE_OPEN)
1171 netif_tx_wake_all_queues(bp->dev);
1176 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1178 bnx2x_int_disable_sync(bp, disable_hw);
1179 bnx2x_napi_disable(bp);
1180 netif_tx_disable(bp->dev);
1182 static int bnx2x_set_num_queues(struct bnx2x *bp)
1186 switch (bp->int_mode) {
1188 bnx2x_enable_msi(bp);
1189 /* falling through... */
1192 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1195 /* Set number of queues according to bp->multi_mode value */
1196 bnx2x_set_num_queues_msix(bp);
1198 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1201 /* if we can't use MSI-X we only need one fp,
1202 * so try to enable MSI-X with the requested number of fp's
1203 * and fallback to MSI or legacy INTx with one fp
1205 rc = bnx2x_enable_msix(bp);
1207 /* failed to enable MSI-X */
1210 /* Fall to INTx if failed to enable MSI-X due to lack of
1211 * memory (in bnx2x_set_num_queues()) */
1212 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1213 bnx2x_enable_msi(bp);
1218 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1219 return netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1222 static void bnx2x_release_firmware(struct bnx2x *bp)
1224 kfree(bp->init_ops_offsets);
1225 kfree(bp->init_ops);
1226 kfree(bp->init_data);
1227 release_firmware(bp->firmware);
1230 /* must be called with rtnl_lock */
1231 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1236 /* Set init arrays */
1237 rc = bnx2x_init_firmware(bp);
1239 BNX2X_ERR("Error loading firmware\n");
1243 #ifdef BNX2X_STOP_ON_ERROR
1244 if (unlikely(bp->panic))
1248 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1250 rc = bnx2x_set_num_queues(bp);
1254 /* must be called before memory allocation and HW init */
1255 bnx2x_ilt_set_info(bp);
1257 if (bnx2x_alloc_mem(bp)) {
1258 bnx2x_free_irq(bp, true);
1262 for_each_queue(bp, i)
1263 bnx2x_fp(bp, i, disable_tpa) =
1264 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1266 for_each_queue(bp, i)
1267 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1270 bnx2x_napi_enable(bp);
1272 if (bp->flags & USING_MSIX_FLAG) {
1273 rc = bnx2x_req_msix_irqs(bp);
1275 bnx2x_free_irq(bp, true);
1280 rc = bnx2x_req_irq(bp);
1282 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1283 bnx2x_free_irq(bp, true);
1286 if (bp->flags & USING_MSI_FLAG) {
1287 bp->dev->irq = bp->pdev->irq;
1288 netdev_info(bp->dev, "using MSI IRQ %d\n",
1293 /* Send LOAD_REQUEST command to MCP
1294 Returns the type of LOAD command:
1295 if it is the first port to be initialized
1296 common blocks should be initialized, otherwise - not
1298 if (!BP_NOMCP(bp)) {
1299 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1301 BNX2X_ERR("MCP response failure, aborting\n");
1305 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1306 rc = -EBUSY; /* other port in diagnostic mode */
1311 int port = BP_PORT(bp);
1313 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1314 load_count[0], load_count[1], load_count[2]);
1316 load_count[1 + port]++;
1317 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1318 load_count[0], load_count[1], load_count[2]);
1319 if (load_count[0] == 1)
1320 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1321 else if (load_count[1 + port] == 1)
1322 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1324 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1327 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1328 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1332 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1335 rc = bnx2x_init_hw(bp, load_code);
1337 BNX2X_ERR("HW init failed, aborting\n");
1338 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1339 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1340 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1345 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1349 /* Setup NIC internals and enable interrupts */
1350 bnx2x_nic_init(bp, load_code);
1352 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1353 (bp->common.shmem2_base))
1354 SHMEM2_WR(bp, dcc_support,
1355 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1356 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1358 /* Send LOAD_DONE command to MCP */
1359 if (!BP_NOMCP(bp)) {
1360 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1362 BNX2X_ERR("MCP response failure, aborting\n");
1368 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1370 rc = bnx2x_func_start(bp);
1372 BNX2X_ERR("Function start failed!\n");
1373 #ifndef BNX2X_STOP_ON_ERROR
1381 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1383 BNX2X_ERR("Setup leading failed!\n");
1384 #ifndef BNX2X_STOP_ON_ERROR
1392 if (CHIP_IS_E1H(bp))
1393 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1394 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1395 bp->flags |= MF_FUNC_DIS;
1399 /* Enable Timer scan */
1400 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1402 for_each_nondefault_queue(bp, i) {
1403 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1412 /* Now when Clients are configured we are ready to work */
1413 bp->state = BNX2X_STATE_OPEN;
1415 bnx2x_set_eth_mac(bp, 1);
1418 /* Set iSCSI L2 MAC */
1419 mutex_lock(&bp->cnic_mutex);
1420 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1421 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1422 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1423 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
1424 BNX2X_VF_ID_INVALID, false,
1425 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
1427 mutex_unlock(&bp->cnic_mutex);
1431 bnx2x_initial_phy_init(bp, load_mode);
1433 /* Start fast path */
1434 switch (load_mode) {
1436 /* Tx queue should be only reenabled */
1437 netif_tx_wake_all_queues(bp->dev);
1438 /* Initialize the receive filter. */
1439 bnx2x_set_rx_mode(bp->dev);
1443 netif_tx_start_all_queues(bp->dev);
1444 smp_mb__after_clear_bit();
1445 /* Initialize the receive filter. */
1446 bnx2x_set_rx_mode(bp->dev);
1450 /* Initialize the receive filter. */
1451 bnx2x_set_rx_mode(bp->dev);
1452 bp->state = BNX2X_STATE_DIAG;
1460 bnx2x__link_status_update(bp);
1462 /* start the timer */
1463 mod_timer(&bp->timer, jiffies + bp->current_interval);
1466 bnx2x_setup_cnic_irq_info(bp);
1467 if (bp->state == BNX2X_STATE_OPEN)
1468 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1470 bnx2x_inc_load_cnt(bp);
1472 bnx2x_release_firmware(bp);
1478 /* Disable Timer scan */
1479 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1482 bnx2x_int_disable_sync(bp, 1);
1483 if (!BP_NOMCP(bp)) {
1484 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1485 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1488 /* Free SKBs, SGEs, TPA pool and driver internals */
1489 bnx2x_free_skbs(bp);
1490 for_each_queue(bp, i)
1491 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1494 bnx2x_free_irq(bp, false);
1496 bnx2x_napi_disable(bp);
1497 for_each_queue(bp, i)
1498 netif_napi_del(&bnx2x_fp(bp, i, napi));
1501 bnx2x_release_firmware(bp);
1506 /* must be called with rtnl_lock */
1507 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1511 if (bp->state == BNX2X_STATE_CLOSED) {
1512 /* Interface has been removed - nothing to recover */
1513 bp->recovery_state = BNX2X_RECOVERY_DONE;
1515 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1522 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1524 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1526 /* Set "drop all" */
1527 bp->rx_mode = BNX2X_RX_MODE_NONE;
1528 bnx2x_set_storm_rx_mode(bp);
1530 del_timer_sync(&bp->timer);
1531 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1532 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1533 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1536 /* Cleanup the chip if needed */
1537 if (unload_mode != UNLOAD_RECOVERY)
1538 bnx2x_chip_cleanup(bp, unload_mode);
1540 /* Disable HW interrupts, NAPI and Tx */
1541 bnx2x_netif_stop(bp, 1);
1544 bnx2x_free_irq(bp, false);
1549 /* Free SKBs, SGEs, TPA pool and driver internals */
1550 bnx2x_free_skbs(bp);
1551 for_each_queue(bp, i)
1552 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1553 for_each_queue(bp, i)
1554 netif_napi_del(&bnx2x_fp(bp, i, napi));
1557 bp->state = BNX2X_STATE_CLOSED;
1559 /* The last driver must disable a "close the gate" if there is no
1560 * parity attention or "process kill" pending.
1562 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1563 bnx2x_reset_is_done(bp))
1564 bnx2x_disable_close_the_gate(bp);
1566 /* Reset MCP mail box sequence if there is on going recovery */
1567 if (unload_mode == UNLOAD_RECOVERY)
1572 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1576 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1580 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1581 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1582 PCI_PM_CTRL_PME_STATUS));
1584 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1585 /* delay required during transition out of D3hot */
1590 /* If there are other clients above don't
1591 shut down the power */
1592 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1594 /* Don't shut down the power for emulation and FPGA */
1595 if (CHIP_REV_IS_SLOW(bp))
1598 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1602 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1604 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1607 /* No more memory access after this point until
1608 * device is brought back to D0.
1621 * net_device service functions
1624 static int bnx2x_poll(struct napi_struct *napi, int budget)
1627 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1629 struct bnx2x *bp = fp->bp;
1632 #ifdef BNX2X_STOP_ON_ERROR
1633 if (unlikely(bp->panic)) {
1634 napi_complete(napi);
1639 if (bnx2x_has_tx_work(fp))
1642 if (bnx2x_has_rx_work(fp)) {
1643 work_done += bnx2x_rx_int(fp, budget - work_done);
1645 /* must not complete if we consumed full budget */
1646 if (work_done >= budget)
1650 /* Fall out from the NAPI loop if needed */
1651 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1652 bnx2x_update_fpsb_idx(fp);
1653 /* bnx2x_has_rx_work() reads the status block,
1654 * thus we need to ensure that status block indices
1655 * have been actually read (bnx2x_update_fpsb_idx)
1656 * prior to this check (bnx2x_has_rx_work) so that
1657 * we won't write the "newer" value of the status block
1658 * to IGU (if there was a DMA right after
1659 * bnx2x_has_rx_work and if there is no rmb, the memory
1660 * reading (bnx2x_update_fpsb_idx) may be postponed
1661 * to right before bnx2x_ack_sb). In this case there
1662 * will never be another interrupt until there is
1663 * another update of the status block, while there
1664 * is still unhandled work.
1668 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1669 napi_complete(napi);
1670 /* Re-enable interrupts */
1672 "Update index to %d\n", fp->fp_hc_idx);
1673 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1674 le16_to_cpu(fp->fp_hc_idx),
1685 /* we split the first BD into headers and data BDs
1686 * to ease the pain of our fellow microcode engineers
1687 * we use one mapping for both BDs
1688 * So far this has only been observed to happen
1689 * in Other Operating Systems(TM)
1691 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1692 struct bnx2x_fastpath *fp,
1693 struct sw_tx_bd *tx_buf,
1694 struct eth_tx_start_bd **tx_bd, u16 hlen,
1695 u16 bd_prod, int nbd)
1697 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1698 struct eth_tx_bd *d_tx_bd;
1700 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1702 /* first fix first BD */
1703 h_tx_bd->nbd = cpu_to_le16(nbd);
1704 h_tx_bd->nbytes = cpu_to_le16(hlen);
1706 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1707 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1708 h_tx_bd->addr_lo, h_tx_bd->nbd);
1710 /* now get a new data BD
1711 * (after the pbd) and fill it */
1712 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1713 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1715 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1716 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1718 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1719 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1720 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1722 /* this marks the BD as one that has no individual mapping */
1723 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1725 DP(NETIF_MSG_TX_QUEUED,
1726 "TSO split data size is %d (%x:%x)\n",
1727 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1730 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1735 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1738 csum = (u16) ~csum_fold(csum_sub(csum,
1739 csum_partial(t_header - fix, fix, 0)));
1742 csum = (u16) ~csum_fold(csum_add(csum,
1743 csum_partial(t_header, -fix, 0)));
1745 return swab16(csum);
1748 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1752 if (skb->ip_summed != CHECKSUM_PARTIAL)
1756 if (skb->protocol == htons(ETH_P_IPV6)) {
1758 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1759 rc |= XMIT_CSUM_TCP;
1763 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1764 rc |= XMIT_CSUM_TCP;
1768 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1769 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1771 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1772 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1777 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1778 /* check if packet requires linearization (packet is too fragmented)
1779 no need to check fragmentation if page size > 8K (there will be no
1780 violation to FW restrictions) */
1781 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1786 int first_bd_sz = 0;
1788 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1789 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1791 if (xmit_type & XMIT_GSO) {
1792 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1793 /* Check if LSO packet needs to be copied:
1794 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1795 int wnd_size = MAX_FETCH_BD - 3;
1796 /* Number of windows to check */
1797 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1802 /* Headers length */
1803 hlen = (int)(skb_transport_header(skb) - skb->data) +
1806 /* Amount of data (w/o headers) on linear part of SKB*/
1807 first_bd_sz = skb_headlen(skb) - hlen;
1809 wnd_sum = first_bd_sz;
1811 /* Calculate the first sum - it's special */
1812 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1814 skb_shinfo(skb)->frags[frag_idx].size;
1816 /* If there was data on linear skb data - check it */
1817 if (first_bd_sz > 0) {
1818 if (unlikely(wnd_sum < lso_mss)) {
1823 wnd_sum -= first_bd_sz;
1826 /* Others are easier: run through the frag list and
1827 check all windows */
1828 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1830 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1832 if (unlikely(wnd_sum < lso_mss)) {
1837 skb_shinfo(skb)->frags[wnd_idx].size;
1840 /* in non-LSO too fragmented packet should always
1847 if (unlikely(to_copy))
1848 DP(NETIF_MSG_TX_QUEUED,
1849 "Linearization IS REQUIRED for %s packet. "
1850 "num_frags %d hlen %d first_bd_sz %d\n",
1851 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1852 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1858 /* called with netif_tx_lock
1859 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1860 * netif_wake_queue()
1862 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1864 struct bnx2x *bp = netdev_priv(dev);
1865 struct bnx2x_fastpath *fp;
1866 struct netdev_queue *txq;
1867 struct sw_tx_bd *tx_buf;
1868 struct eth_tx_start_bd *tx_start_bd;
1869 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1870 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1871 u16 pkt_prod, bd_prod;
1874 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1877 __le16 pkt_size = 0;
1879 u8 mac_type = UNICAST_ADDRESS;
1881 #ifdef BNX2X_STOP_ON_ERROR
1882 if (unlikely(bp->panic))
1883 return NETDEV_TX_BUSY;
1886 fp_index = skb_get_queue_mapping(skb);
1887 txq = netdev_get_tx_queue(dev, fp_index);
1889 fp = &bp->fp[fp_index];
1891 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1892 fp->eth_q_stats.driver_xoff++;
1893 netif_tx_stop_queue(txq);
1894 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1895 return NETDEV_TX_BUSY;
1898 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1899 " gso type %x xmit_type %x\n",
1900 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1901 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1903 eth = (struct ethhdr *)skb->data;
1905 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1906 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1907 if (is_broadcast_ether_addr(eth->h_dest))
1908 mac_type = BROADCAST_ADDRESS;
1910 mac_type = MULTICAST_ADDRESS;
1913 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1914 /* First, check if we need to linearize the skb (due to FW
1915 restrictions). No need to check fragmentation if page size > 8K
1916 (there will be no violation to FW restrictions) */
1917 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1918 /* Statistics of linearization */
1920 if (skb_linearize(skb) != 0) {
1921 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1922 "silently dropping this SKB\n");
1923 dev_kfree_skb_any(skb);
1924 return NETDEV_TX_OK;
1930 Please read carefully. First we use one BD which we mark as start,
1931 then we have a parsing info BD (used for TSO or xsum),
1932 and only then we have the rest of the TSO BDs.
1933 (don't forget to mark the last one as last,
1934 and to unmap only AFTER you write to the BD ...)
1935 And above all, all pdb sizes are in words - NOT DWORDS!
1938 pkt_prod = fp->tx_pkt_prod++;
1939 bd_prod = TX_BD(fp->tx_bd_prod);
1941 /* get a tx_buf and first BD */
1942 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1943 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1945 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1946 SET_FLAG(tx_start_bd->general_data,
1947 ETH_TX_START_BD_ETH_ADDR_TYPE,
1950 SET_FLAG(tx_start_bd->general_data,
1951 ETH_TX_START_BD_HDR_NBDS,
1954 /* remember the first BD of the packet */
1955 tx_buf->first_bd = fp->tx_bd_prod;
1959 DP(NETIF_MSG_TX_QUEUED,
1960 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1961 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1964 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1965 (bp->flags & HW_VLAN_TX_FLAG)) {
1966 tx_start_bd->vlan_or_ethertype =
1967 cpu_to_le16(vlan_tx_tag_get(skb));
1968 tx_start_bd->bd_flags.as_bitfield |=
1969 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
1972 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1974 /* turn on parsing and get a BD */
1975 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1977 if (xmit_type & XMIT_CSUM) {
1978 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1980 if (xmit_type & XMIT_CSUM_V4)
1981 tx_start_bd->bd_flags.as_bitfield |=
1982 ETH_TX_BD_FLAGS_IP_CSUM;
1984 tx_start_bd->bd_flags.as_bitfield |=
1985 ETH_TX_BD_FLAGS_IPV6;
1987 if (!(xmit_type & XMIT_CSUM_TCP))
1988 tx_start_bd->bd_flags.as_bitfield |=
1989 ETH_TX_BD_FLAGS_IS_UDP;
1991 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
1992 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1993 /* Set PBD in checksum offload case */
1994 if (xmit_type & XMIT_CSUM) {
1995 hlen = (skb_network_header(skb) - skb->data) / 2;
1997 /* for now NS flag is not used in Linux */
1998 pbd_e1x->global_data =
1999 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2000 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2002 pbd_e1x->ip_hlen_w = (skb_transport_header(skb) -
2003 skb_network_header(skb)) / 2;
2005 hlen += pbd_e1x->ip_hlen_w + tcp_hdrlen(skb) / 2;
2007 pbd_e1x->total_hlen_w = cpu_to_le16(hlen);
2010 if (xmit_type & XMIT_CSUM_TCP) {
2011 pbd_e1x->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2014 s8 fix = SKB_CS_OFF(skb); /* signed! */
2016 DP(NETIF_MSG_TX_QUEUED,
2017 "hlen %d fix %d csum before fix %x\n",
2018 le16_to_cpu(pbd_e1x->total_hlen_w),
2021 /* HW bug: fixup the CSUM */
2022 pbd_e1x->tcp_pseudo_csum =
2023 bnx2x_csum_fix(skb_transport_header(skb),
2026 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2027 pbd_e1x->tcp_pseudo_csum);
2031 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2032 skb_headlen(skb), DMA_TO_DEVICE);
2034 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2035 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2036 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2037 tx_start_bd->nbd = cpu_to_le16(nbd);
2038 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2039 pkt_size = tx_start_bd->nbytes;
2041 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2042 " nbytes %d flags %x vlan %x\n",
2043 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2044 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2045 tx_start_bd->bd_flags.as_bitfield,
2046 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2048 if (xmit_type & XMIT_GSO) {
2050 DP(NETIF_MSG_TX_QUEUED,
2051 "TSO packet len %d hlen %d total len %d tso size %d\n",
2052 skb->len, hlen, skb_headlen(skb),
2053 skb_shinfo(skb)->gso_size);
2055 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2057 if (unlikely(skb_headlen(skb) > hlen))
2058 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2059 hlen, bd_prod, ++nbd);
2061 pbd_e1x->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2062 pbd_e1x->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2063 pbd_e1x->tcp_flags = pbd_tcp_flags(skb);
2065 if (xmit_type & XMIT_GSO_V4) {
2066 pbd_e1x->ip_id = swab16(ip_hdr(skb)->id);
2067 pbd_e1x->tcp_pseudo_csum =
2068 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2070 0, IPPROTO_TCP, 0));
2073 pbd_e1x->tcp_pseudo_csum =
2074 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2075 &ipv6_hdr(skb)->daddr,
2076 0, IPPROTO_TCP, 0));
2078 pbd_e1x->global_data |=
2079 ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2081 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2083 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2084 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2086 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2087 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2088 if (total_pkt_bd == NULL)
2089 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2091 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2093 frag->size, DMA_TO_DEVICE);
2095 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2096 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2097 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2098 le16_add_cpu(&pkt_size, frag->size);
2100 DP(NETIF_MSG_TX_QUEUED,
2101 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2102 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2103 le16_to_cpu(tx_data_bd->nbytes));
2106 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2108 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2110 /* now send a tx doorbell, counting the next BD
2111 * if the packet contains or ends with it
2113 if (TX_BD_POFF(bd_prod) < nbd)
2116 if (total_pkt_bd != NULL)
2117 total_pkt_bd->total_pkt_bytes = pkt_size;
2120 DP(NETIF_MSG_TX_QUEUED,
2121 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2122 " tcp_flags %x xsum %x seq %u hlen %u\n",
2123 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2124 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2125 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2126 le16_to_cpu(pbd_e1x->total_hlen_w));
2128 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2131 * Make sure that the BD data is updated before updating the producer
2132 * since FW might read the BD right after the producer is updated.
2133 * This is only applicable for weak-ordered memory model archs such
2134 * as IA-64. The following barrier is also mandatory since FW will
2135 * assumes packets must have BDs.
2139 fp->tx_db.data.prod += nbd;
2141 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2145 fp->tx_bd_prod += nbd;
2147 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2148 netif_tx_stop_queue(txq);
2150 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2151 * ordering of set_bit() in netif_tx_stop_queue() and read of
2155 fp->eth_q_stats.driver_xoff++;
2156 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2157 netif_tx_wake_queue(txq);
2161 return NETDEV_TX_OK;
2163 /* called with rtnl_lock */
2164 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2166 struct sockaddr *addr = p;
2167 struct bnx2x *bp = netdev_priv(dev);
2169 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2172 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2173 if (netif_running(dev))
2174 bnx2x_set_eth_mac(bp, 1);
2179 void bnx2x_free_mem_bp(struct bnx2x *bp)
2182 kfree(bp->msix_table);
2186 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2188 struct bnx2x_fastpath *fp;
2189 struct msix_entry *tbl;
2190 struct bnx2x_ilt *ilt;
2193 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2199 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2203 bp->msix_table = tbl;
2206 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2213 bnx2x_free_mem_bp(bp);
2218 /* called with rtnl_lock */
2219 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2221 struct bnx2x *bp = netdev_priv(dev);
2224 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2225 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2229 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2230 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2233 /* This does not race with packet allocation
2234 * because the actual alloc size is
2235 * only updated as part of load
2239 if (netif_running(dev)) {
2240 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2241 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2247 void bnx2x_tx_timeout(struct net_device *dev)
2249 struct bnx2x *bp = netdev_priv(dev);
2251 #ifdef BNX2X_STOP_ON_ERROR
2255 /* This allows the netif to be shutdown gracefully before resetting */
2256 schedule_delayed_work(&bp->reset_task, 0);
2260 /* called with rtnl_lock */
2261 void bnx2x_vlan_rx_register(struct net_device *dev,
2262 struct vlan_group *vlgrp)
2264 struct bnx2x *bp = netdev_priv(dev);
2270 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2272 struct net_device *dev = pci_get_drvdata(pdev);
2276 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2279 bp = netdev_priv(dev);
2283 pci_save_state(pdev);
2285 if (!netif_running(dev)) {
2290 netif_device_detach(dev);
2292 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2294 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2301 int bnx2x_resume(struct pci_dev *pdev)
2303 struct net_device *dev = pci_get_drvdata(pdev);
2308 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2311 bp = netdev_priv(dev);
2313 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2314 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2320 pci_restore_state(pdev);
2322 if (!netif_running(dev)) {
2327 bnx2x_set_power_state(bp, PCI_D0);
2328 netif_device_attach(dev);
2330 rc = bnx2x_nic_load(bp, LOAD_OPEN);