1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
60 #include "bnx2x_init.h"
62 #define DRV_MODULE_VERSION "1.45.23"
63 #define DRV_MODULE_RELDATE "2008/11/03"
64 #define BNX2X_BC_VER 0x040200
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT (5*HZ)
69 static char version[] __devinitdata =
70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
78 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 static struct workqueue_struct *bnx2x_wq;
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 BNX2X_ERR("dmae timeout!\n");
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
250 mutex_unlock(&bp->dmae_mutex);
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 struct dmae_command *dmae = &bp->init_dmae;
256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 mutex_lock(&bp->dmae_mutex);
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 DMAE_CMD_ENDIANITY_DW_SWAP |
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_val = DMAE_COMP_VAL;
294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308 while (*wb_comp != DMAE_COMP_VAL) {
311 BNX2X_ERR("dmae timeout!\n");
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325 mutex_unlock(&bp->dmae_mutex);
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 REG_RD_DMAE(bp, reg, wb_data, 2);
345 return HILO_U64(wb_data[0], wb_data[1]);
349 static int bnx2x_mc_assert(struct bnx2x *bp)
353 u32 row0, row1, row2, row3;
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
470 static void bnx2x_fw_dump(struct bnx2x *bp)
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 printk(KERN_CONT "%s", (char *)data);
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 printk(KERN_CONT "%s", (char *)data);
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
497 static void bnx2x_panic_dump(struct bnx2x *bp)
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505 BNX2X_ERR("begin crash dump -----------------\n");
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
580 " spq_prod_idx(%u)\n",
581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586 BNX2X_ERR("end crash dump -----------------\n");
589 static void bnx2x_int_enable(struct bnx2x *bp)
591 int port = BP_PORT(bp);
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
609 REG_WR(bp, addr, val);
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
615 val, port, addr, msix);
617 REG_WR(bp, addr, val);
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
624 /* enable nig attention */
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
634 static void bnx2x_int_disable(struct bnx2x *bp)
636 int port = BP_PORT(bp);
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 /* disable interrupt handling */
659 atomic_inc(&bp->intr_sem);
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp);
664 /* make sure all ISRs are done */
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
672 synchronize_irq(bp->pdev->irq);
674 /* make sure sp_task is not running */
675 cancel_delayed_work(&bp->sp_task);
676 flush_workqueue(bnx2x_wq);
682 * General service functions
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686 u8 storm, u16 index, u8 op, u8 update)
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
690 struct igu_ack_register igu_ack;
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
704 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
706 struct host_status_block *fpsb = fp->status_blk;
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
721 static u16 bnx2x_ack_int(struct bnx2x *bp)
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 * fast path service functions
738 /* free skb in the packet ring at pos idx
739 * return idx of last bd freed
741 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
760 new_cons = nbd + tx_buf->first_bd;
761 #ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
763 BNX2X_ERR("BAD nbd!\n");
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800 tx_buf->first_bd = 0;
806 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
812 barrier(); /* Tell compiler that prod and cons can change */
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
820 #ifdef BNX2X_STOP_ON_ERROR
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
826 return (s16)(fp->bp->tx_ring_size) - used;
829 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
835 #ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
843 while (sw_cons != hw_cons) {
846 pkt_cons = TX_BD(sw_cons);
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
851 hw_cons, sw_cons, pkt_cons);
853 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
879 netif_tx_lock(bp->dev);
881 if (netif_queue_stopped(bp->dev) &&
882 (bp->state == BNX2X_STATE_OPEN) &&
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
886 netif_tx_unlock(bp->dev);
891 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892 union eth_rx_cqe *rr_cqe)
894 struct bnx2x *bp = fp->bp;
895 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
899 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
900 FP_IDX(fp), cid, command, bp->state,
901 rr_cqe->ramrod_cqe.ramrod_type);
906 switch (command | fp->state) {
907 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908 BNX2X_FP_STATE_OPENING):
909 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
911 fp->state = BNX2X_FP_STATE_OPEN;
914 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
917 fp->state = BNX2X_FP_STATE_HALTED;
921 BNX2X_ERR("unexpected MC reply (%d) "
922 "fp->state is %x\n", command, fp->state);
925 mb(); /* force bnx2x_wait_ramrod() to see the change */
929 switch (command | bp->state) {
930 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932 bp->state = BNX2X_STATE_OPEN;
935 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938 fp->state = BNX2X_FP_STATE_HALTED;
941 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
942 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
943 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
947 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
949 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
950 bp->set_mac_pending = 0;
953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
962 mb(); /* force bnx2x_wait_ramrod() to see the change */
965 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
968 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969 struct page *page = sw_buf->page;
970 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
972 /* Skip "next page" elements */
976 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
977 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
978 __free_pages(page, PAGES_PER_SGE_SHIFT);
985 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986 struct bnx2x_fastpath *fp, int last)
990 for (i = 0; i < last; i++)
991 bnx2x_free_rx_sge(bp, fp, i);
994 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, u16 index)
997 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1002 if (unlikely(page == NULL))
1005 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1006 PCI_DMA_FROMDEVICE);
1007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1008 __free_pages(page, PAGES_PER_SGE_SHIFT);
1012 sw_buf->page = page;
1013 pci_unmap_addr_set(sw_buf, mapping, mapping);
1015 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1021 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct sk_buff *skb;
1025 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1029 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030 if (unlikely(skb == NULL))
1033 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1034 PCI_DMA_FROMDEVICE);
1035 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1041 pci_unmap_addr_set(rx_buf, mapping, mapping);
1043 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1049 /* note that we are not allocating a new skb,
1050 * we are just moving one from cons to prod
1051 * we are not creating a new mapping,
1052 * so there is no need to check for dma_mapping_error().
1054 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055 struct sk_buff *skb, u16 cons, u16 prod)
1057 struct bnx2x *bp = fp->bp;
1058 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1063 pci_dma_sync_single_for_device(bp->pdev,
1064 pci_unmap_addr(cons_rx_buf, mapping),
1065 bp->rx_offset + RX_COPY_THRESH,
1066 PCI_DMA_FROMDEVICE);
1068 prod_rx_buf->skb = cons_rx_buf->skb;
1069 pci_unmap_addr_set(prod_rx_buf, mapping,
1070 pci_unmap_addr(cons_rx_buf, mapping));
1071 *prod_bd = *cons_bd;
1074 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1077 u16 last_max = fp->last_max_sge;
1079 if (SUB_S16(idx, last_max) > 0)
1080 fp->last_max_sge = idx;
1083 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1087 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088 int idx = RX_SGE_CNT * i - 1;
1090 for (j = 0; j < 2; j++) {
1091 SGE_MASK_CLEAR_BIT(fp, idx);
1097 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098 struct eth_fast_path_rx_cqe *fp_cqe)
1100 struct bnx2x *bp = fp->bp;
1101 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1102 le16_to_cpu(fp_cqe->len_on_bd)) >>
1104 u16 last_max, last_elem, first_elem;
1111 /* First mark all used pages */
1112 for (i = 0; i < sge_len; i++)
1113 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1115 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1118 /* Here we assume that the last SGE index is the biggest */
1119 prefetch((void *)(fp->sge_mask));
1120 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1122 last_max = RX_SGE(fp->last_max_sge);
1123 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1126 /* If ring is not full */
1127 if (last_elem + 1 != first_elem)
1130 /* Now update the prod */
1131 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132 if (likely(fp->sge_mask[i]))
1135 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136 delta += RX_SGE_MASK_ELEM_SZ;
1140 fp->rx_sge_prod += delta;
1141 /* clear page-end entries */
1142 bnx2x_clear_sge_mask_next_elems(fp);
1145 DP(NETIF_MSG_RX_STATUS,
1146 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1147 fp->last_max_sge, fp->rx_sge_prod);
1150 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1152 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153 memset(fp->sge_mask, 0xff,
1154 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1156 /* Clear the two last indices in the page to 1:
1157 these are the indices that correspond to the "next" element,
1158 hence will never be indicated and should be removed from
1159 the calculations. */
1160 bnx2x_clear_sge_mask_next_elems(fp);
1163 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164 struct sk_buff *skb, u16 cons, u16 prod)
1166 struct bnx2x *bp = fp->bp;
1167 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1172 /* move empty skb from pool to prod and map it */
1173 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1175 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1176 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1178 /* move partial skb from cons to pool (don't unmap yet) */
1179 fp->tpa_pool[queue] = *cons_rx_buf;
1181 /* mark bin state as start - print error if current state != stop */
1182 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1185 fp->tpa_state[queue] = BNX2X_TPA_START;
1187 /* point prod_bd to new skb */
1188 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1191 #ifdef BNX2X_STOP_ON_ERROR
1192 fp->tpa_queue_used |= (1 << queue);
1193 #ifdef __powerpc64__
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1196 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1198 fp->tpa_queue_used);
1202 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb,
1204 struct eth_fast_path_rx_cqe *fp_cqe,
1207 struct sw_rx_page *rx_pg, old_rx_pg;
1208 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1209 u32 i, frag_len, frag_size, pages;
1213 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1214 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1216 /* This is needed in order to enable forwarding support */
1218 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1219 max(frag_size, (u32)len_on_bd));
1221 #ifdef BNX2X_STOP_ON_ERROR
1223 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1224 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1226 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1227 fp_cqe->pkt_len, len_on_bd);
1233 /* Run through the SGL and compose the fragmented skb */
1234 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1237 /* FW gives the indices of the SGE as if the ring is an array
1238 (meaning that "next" element will consume 2 indices) */
1239 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1240 rx_pg = &fp->rx_page_ring[sge_idx];
1243 /* If we fail to allocate a substitute page, we simply stop
1244 where we are and drop the whole packet */
1245 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1246 if (unlikely(err)) {
1247 bp->eth_stats.rx_skb_alloc_failed++;
1251 /* Unmap the page as we r going to pass it to the stack */
1252 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1253 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1255 /* Add one frag and update the appropriate fields in the skb */
1256 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1258 skb->data_len += frag_len;
1259 skb->truesize += frag_len;
1260 skb->len += frag_len;
1262 frag_size -= frag_len;
1268 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1269 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1272 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1273 struct sk_buff *skb = rx_buf->skb;
1275 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1277 /* Unmap skb in the pool anyway, as we are going to change
1278 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1280 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1281 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1283 if (likely(new_skb)) {
1284 /* fix ip xsum and give it to the stack */
1285 /* (no need to map the new skb) */
1288 prefetch(((char *)(skb)) + 128);
1290 #ifdef BNX2X_STOP_ON_ERROR
1291 if (pad + len > bp->rx_buf_size) {
1292 BNX2X_ERR("skb_put is about to fail... "
1293 "pad %d len %d rx_buf_size %d\n",
1294 pad, len, bp->rx_buf_size);
1300 skb_reserve(skb, pad);
1303 skb->protocol = eth_type_trans(skb, bp->dev);
1304 skb->ip_summed = CHECKSUM_UNNECESSARY;
1309 iph = (struct iphdr *)skb->data;
1311 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1314 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1315 &cqe->fast_path_cqe, cqe_idx)) {
1317 if ((bp->vlgrp != NULL) &&
1318 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1319 PARSING_FLAGS_VLAN))
1320 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1321 le16_to_cpu(cqe->fast_path_cqe.
1325 netif_receive_skb(skb);
1327 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1328 " - dropping packet!\n");
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1337 /* else drop the packet and keep the buffer in the bin */
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
1340 bp->eth_stats.rx_skb_alloc_failed++;
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1346 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1360 * Make sure that the BD and SGE data is updated before updating the
1361 * producers since FW might read the BD/SGE right after the producer
1363 * This is only applicable for weak-ordered memory model archs such
1364 * as IA-64. The following barrier is also mandatory since FW will
1365 * assumes BDs must have buffers.
1369 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1370 REG_WR(bp, BAR_TSTRORM_INTMEM +
1371 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1372 ((u32 *)&rx_prods)[i]);
1374 mmiowb(); /* keep prod updates ordered */
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1378 bd_prod, rx_comp_prod, rx_sge_prod);
1381 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1383 struct bnx2x *bp = fp->bp;
1384 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1385 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1388 #ifdef BNX2X_STOP_ON_ERROR
1389 if (unlikely(bp->panic))
1393 /* CQ "next element" is of the size of the regular element,
1394 that's why it's ok here */
1395 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1396 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1399 bd_cons = fp->rx_bd_cons;
1400 bd_prod = fp->rx_bd_prod;
1401 bd_prod_fw = bd_prod;
1402 sw_comp_cons = fp->rx_comp_cons;
1403 sw_comp_prod = fp->rx_comp_prod;
1405 /* Memory barrier necessary as speculative reads of the rx
1406 * buffer can be ahead of the index in the status block
1410 DP(NETIF_MSG_RX_STATUS,
1411 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1412 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1414 while (sw_comp_cons != hw_comp_cons) {
1415 struct sw_rx_bd *rx_buf = NULL;
1416 struct sk_buff *skb;
1417 union eth_rx_cqe *cqe;
1421 comp_ring_cons = RCQ_BD(sw_comp_cons);
1422 bd_prod = RX_BD(bd_prod);
1423 bd_cons = RX_BD(bd_cons);
1425 cqe = &fp->rx_comp_ring[comp_ring_cons];
1426 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1428 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1429 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1430 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1431 cqe->fast_path_cqe.rss_hash_result,
1432 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1433 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1435 /* is this a slowpath msg? */
1436 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1437 bnx2x_sp_event(fp, cqe);
1440 /* this is an rx packet */
1442 rx_buf = &fp->rx_buf_ring[bd_cons];
1444 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1445 pad = cqe->fast_path_cqe.placement_offset;
1447 /* If CQE is marked both TPA_START and TPA_END
1448 it is a non-TPA CQE */
1449 if ((!fp->disable_tpa) &&
1450 (TPA_TYPE(cqe_fp_flags) !=
1451 (TPA_TYPE_START | TPA_TYPE_END))) {
1452 u16 queue = cqe->fast_path_cqe.queue_index;
1454 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1455 DP(NETIF_MSG_RX_STATUS,
1456 "calling tpa_start on queue %d\n",
1459 bnx2x_tpa_start(fp, queue, skb,
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_stop on queue %d\n",
1469 if (!BNX2X_RX_SUM_FIX(cqe))
1470 BNX2X_ERR("STOP on none TCP "
1473 /* This is a size of the linear data
1475 len = le16_to_cpu(cqe->fast_path_cqe.
1477 bnx2x_tpa_stop(bp, fp, queue, pad,
1478 len, cqe, comp_ring_cons);
1479 #ifdef BNX2X_STOP_ON_ERROR
1484 bnx2x_update_sge_prod(fp,
1485 &cqe->fast_path_cqe);
1490 pci_dma_sync_single_for_device(bp->pdev,
1491 pci_unmap_addr(rx_buf, mapping),
1492 pad + RX_COPY_THRESH,
1493 PCI_DMA_FROMDEVICE);
1495 prefetch(((char *)(skb)) + 128);
1497 /* is this an error packet? */
1498 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1499 DP(NETIF_MSG_RX_ERR,
1500 "ERROR flags %x rx packet %u\n",
1501 cqe_fp_flags, sw_comp_cons);
1502 bp->eth_stats.rx_err_discard_pkt++;
1506 /* Since we don't have a jumbo ring
1507 * copy small packets if mtu > 1500
1509 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1510 (len <= RX_COPY_THRESH)) {
1511 struct sk_buff *new_skb;
1513 new_skb = netdev_alloc_skb(bp->dev,
1515 if (new_skb == NULL) {
1516 DP(NETIF_MSG_RX_ERR,
1517 "ERROR packet dropped "
1518 "because of alloc failure\n");
1519 bp->eth_stats.rx_skb_alloc_failed++;
1524 skb_copy_from_linear_data_offset(skb, pad,
1525 new_skb->data + pad, len);
1526 skb_reserve(new_skb, pad);
1527 skb_put(new_skb, len);
1529 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1533 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1534 pci_unmap_single(bp->pdev,
1535 pci_unmap_addr(rx_buf, mapping),
1537 PCI_DMA_FROMDEVICE);
1538 skb_reserve(skb, pad);
1542 DP(NETIF_MSG_RX_ERR,
1543 "ERROR packet dropped because "
1544 "of alloc failure\n");
1545 bp->eth_stats.rx_skb_alloc_failed++;
1547 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1551 skb->protocol = eth_type_trans(skb, bp->dev);
1553 skb->ip_summed = CHECKSUM_NONE;
1555 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1556 skb->ip_summed = CHECKSUM_UNNECESSARY;
1558 bp->eth_stats.hw_csum_err++;
1563 if ((bp->vlgrp != NULL) &&
1564 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1565 PARSING_FLAGS_VLAN))
1566 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1567 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1570 netif_receive_skb(skb);
1576 bd_cons = NEXT_RX_IDX(bd_cons);
1577 bd_prod = NEXT_RX_IDX(bd_prod);
1578 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1581 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1582 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1584 if (rx_pkt == budget)
1588 fp->rx_bd_cons = bd_cons;
1589 fp->rx_bd_prod = bd_prod_fw;
1590 fp->rx_comp_cons = sw_comp_cons;
1591 fp->rx_comp_prod = sw_comp_prod;
1593 /* Update producers */
1594 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1597 fp->rx_pkt += rx_pkt;
1603 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1605 struct bnx2x_fastpath *fp = fp_cookie;
1606 struct bnx2x *bp = fp->bp;
1607 int index = FP_IDX(fp);
1609 /* Return here if interrupt is disabled */
1610 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1611 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1615 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616 index, FP_SB_ID(fp));
1617 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1619 #ifdef BNX2X_STOP_ON_ERROR
1620 if (unlikely(bp->panic))
1624 prefetch(fp->rx_cons_sb);
1625 prefetch(fp->tx_cons_sb);
1626 prefetch(&fp->status_blk->c_status_block.status_block_index);
1627 prefetch(&fp->status_blk->u_status_block.status_block_index);
1629 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1634 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1636 struct net_device *dev = dev_instance;
1637 struct bnx2x *bp = netdev_priv(dev);
1638 u16 status = bnx2x_ack_int(bp);
1641 /* Return here if interrupt is shared and it's not for us */
1642 if (unlikely(status == 0)) {
1643 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1646 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1654 #ifdef BNX2X_STOP_ON_ERROR
1655 if (unlikely(bp->panic))
1659 mask = 0x2 << bp->fp[0].sb_id;
1660 if (status & mask) {
1661 struct bnx2x_fastpath *fp = &bp->fp[0];
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1674 if (unlikely(status & 0x1)) {
1675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1683 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1689 /* end of fast path */
1691 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1696 * General service functions
1699 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1702 u32 resource_bit = (1 << resource);
1703 int func = BP_FUNC(bp);
1704 u32 hw_lock_control_reg;
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1716 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1718 hw_lock_control_reg =
1719 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1722 /* Validating that the resource is not already taken */
1723 lock_status = REG_RD(bp, hw_lock_control_reg);
1724 if (lock_status & resource_bit) {
1725 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1726 lock_status, resource_bit);
1730 /* Try for 5 second every 5ms */
1731 for (cnt = 0; cnt < 1000; cnt++) {
1732 /* Try to acquire the lock */
1733 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1734 lock_status = REG_RD(bp, hw_lock_control_reg);
1735 if (lock_status & resource_bit)
1740 DP(NETIF_MSG_HW, "Timeout\n");
1744 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1747 u32 resource_bit = (1 << resource);
1748 int func = BP_FUNC(bp);
1749 u32 hw_lock_control_reg;
1751 /* Validating that the resource is within range */
1752 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1754 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1755 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1760 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1762 hw_lock_control_reg =
1763 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1766 /* Validating that the resource is currently taken */
1767 lock_status = REG_RD(bp, hw_lock_control_reg);
1768 if (!(lock_status & resource_bit)) {
1769 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1770 lock_status, resource_bit);
1774 REG_WR(bp, hw_lock_control_reg, resource_bit);
1778 /* HW Lock for shared dual port PHYs */
1779 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1781 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783 mutex_lock(&bp->port.phy_mutex);
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1787 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1790 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1792 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1794 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1795 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1796 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1798 mutex_unlock(&bp->port.phy_mutex);
1801 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1803 /* The GPIO should be swapped if swap register is set and active */
1804 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1805 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1806 int gpio_shift = gpio_num +
1807 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1808 u32 gpio_mask = (1 << gpio_shift);
1811 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1812 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1817 /* read GPIO and mask except the float bits */
1818 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1821 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1822 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1823 gpio_num, gpio_shift);
1824 /* clear FLOAT and set CLR */
1825 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1829 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1830 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1831 gpio_num, gpio_shift);
1832 /* clear FLOAT and set SET */
1833 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1837 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1838 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1839 gpio_num, gpio_shift);
1841 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1848 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1854 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1856 u32 spio_mask = (1 << spio_num);
1859 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1860 (spio_num > MISC_REGISTERS_SPIO_7)) {
1861 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1866 /* read SPIO and mask except the float bits */
1867 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1870 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1872 /* clear FLOAT and set CLR */
1873 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1877 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1878 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1879 /* clear FLOAT and set SET */
1880 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1881 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1884 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1885 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1887 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1894 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1895 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1900 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1902 switch (bp->link_vars.ieee_fc &
1903 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1909 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1912 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1913 bp->port.advertising |= ADVERTISED_Asym_Pause;
1916 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1922 static void bnx2x_link_report(struct bnx2x *bp)
1924 if (bp->link_vars.link_up) {
1925 if (bp->state == BNX2X_STATE_OPEN)
1926 netif_carrier_on(bp->dev);
1927 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1929 printk("%d Mbps ", bp->link_vars.line_speed);
1931 if (bp->link_vars.duplex == DUPLEX_FULL)
1932 printk("full duplex");
1934 printk("half duplex");
1936 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1937 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1938 printk(", receive ");
1939 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1940 printk("& transmit ");
1942 printk(", transmit ");
1944 printk("flow control ON");
1948 } else { /* link_down */
1949 netif_carrier_off(bp->dev);
1950 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1954 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1956 if (!BP_NOMCP(bp)) {
1959 /* Initialize link parameters structure variables */
1960 /* It is recommended to turn off RX FC for jumbo frames
1961 for better performance */
1963 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1964 else if (bp->dev->mtu > 5000)
1965 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1967 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1969 bnx2x_acquire_phy_lock(bp);
1970 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971 bnx2x_release_phy_lock(bp);
1973 bnx2x_calc_fc_adv(bp);
1975 if (bp->link_vars.link_up)
1976 bnx2x_link_report(bp);
1981 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1985 static void bnx2x_link_set(struct bnx2x *bp)
1987 if (!BP_NOMCP(bp)) {
1988 bnx2x_acquire_phy_lock(bp);
1989 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1992 bnx2x_calc_fc_adv(bp);
1994 BNX2X_ERR("Bootcode is missing -not setting link\n");
1997 static void bnx2x__link_reset(struct bnx2x *bp)
1999 if (!BP_NOMCP(bp)) {
2000 bnx2x_acquire_phy_lock(bp);
2001 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2002 bnx2x_release_phy_lock(bp);
2004 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2007 static u8 bnx2x_link_test(struct bnx2x *bp)
2011 bnx2x_acquire_phy_lock(bp);
2012 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2013 bnx2x_release_phy_lock(bp);
2018 /* Calculates the sum of vn_min_rates.
2019 It's needed for further normalizing of the min_rates.
2024 0 - if all the min_rates are 0.
2025 In the later case fairness algorithm should be deactivated.
2026 If not all min_rates are zero then those that are zeroes will
2029 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2031 int i, port = BP_PORT(bp);
2035 for (i = 0; i < E1HVN_MAX; i++) {
2037 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2038 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2039 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2040 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2041 /* If min rate is zero - set it to 1 */
2043 vn_min_rate = DEF_MIN_RATE;
2047 wsum += vn_min_rate;
2051 /* ... only if all min rates are zeros - disable FAIRNESS */
2058 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2061 struct cmng_struct_per_port *m_cmng_port)
2063 u32 r_param = port_rate / 8;
2064 int port = BP_PORT(bp);
2067 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2069 /* Enable minmax only if we are in e1hmf mode */
2071 u32 fair_periodic_timeout_usec;
2074 /* Enable rate shaping and fairness */
2075 m_cmng_port->flags.cmng_vn_enable = 1;
2076 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2077 m_cmng_port->flags.rate_shaping_enable = 1;
2080 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2081 " fairness will be disabled\n");
2083 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2084 m_cmng_port->rs_vars.rs_periodic_timeout =
2085 RS_PERIODIC_TIMEOUT_USEC / 4;
2087 /* this is the threshold below which no timer arming will occur
2088 1.25 coefficient is for the threshold to be a little bigger
2089 than the real time, to compensate for timer in-accuracy */
2090 m_cmng_port->rs_vars.rs_threshold =
2091 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2093 /* resolution of fairness timer */
2094 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2095 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2096 t_fair = T_FAIR_COEF / port_rate;
2098 /* this is the threshold below which we won't arm
2099 the timer anymore */
2100 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2102 /* we multiply by 1e3/8 to get bytes/msec.
2103 We don't want the credits to pass a credit
2104 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2105 m_cmng_port->fair_vars.upper_bound =
2106 r_param * t_fair * FAIR_MEM;
2107 /* since each tick is 4 usec */
2108 m_cmng_port->fair_vars.fairness_timeout =
2109 fair_periodic_timeout_usec / 4;
2112 /* Disable rate shaping and fairness */
2113 m_cmng_port->flags.cmng_vn_enable = 0;
2114 m_cmng_port->flags.fairness_enable = 0;
2115 m_cmng_port->flags.rate_shaping_enable = 0;
2118 "Single function mode minmax will be disabled\n");
2121 /* Store it to internal memory */
2122 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2123 REG_WR(bp, BAR_XSTRORM_INTMEM +
2124 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2125 ((u32 *)(m_cmng_port))[i]);
2128 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2129 u32 wsum, u16 port_rate,
2130 struct cmng_struct_per_port *m_cmng_port)
2132 struct rate_shaping_vars_per_vn m_rs_vn;
2133 struct fairness_vars_per_vn m_fair_vn;
2134 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2135 u16 vn_min_rate, vn_max_rate;
2138 /* If function is hidden - set min and max to zeroes */
2139 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2144 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2145 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2146 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2147 if current min rate is zero - set it to 1.
2148 This is a requirement of the algorithm. */
2149 if ((vn_min_rate == 0) && wsum)
2150 vn_min_rate = DEF_MIN_RATE;
2151 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2152 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2155 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2156 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2158 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2159 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2161 /* global vn counter - maximal Mbps for this vn */
2162 m_rs_vn.vn_counter.rate = vn_max_rate;
2164 /* quota - number of bytes transmitted in this period */
2165 m_rs_vn.vn_counter.quota =
2166 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2168 #ifdef BNX2X_PER_PROT_QOS
2169 /* per protocol counter */
2170 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2171 /* maximal Mbps for this protocol */
2172 m_rs_vn.protocol_counters[protocol].rate =
2173 protocol_max_rate[protocol];
2174 /* the quota in each timer period -
2175 number of bytes transmitted in this period */
2176 m_rs_vn.protocol_counters[protocol].quota =
2177 (u32)(rs_periodic_timeout_usec *
2179 protocol_counters[protocol].rate/8));
2184 /* credit for each period of the fairness algorithm:
2185 number of bytes in T_FAIR (the vn share the port rate).
2186 wsum should not be larger than 10000, thus
2187 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2188 m_fair_vn.vn_credit_delta =
2189 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2190 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2191 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2192 m_fair_vn.vn_credit_delta);
2195 #ifdef BNX2X_PER_PROT_QOS
2197 u32 protocolWeightSum = 0;
2199 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2200 protocolWeightSum +=
2201 drvInit.protocol_min_rate[protocol];
2202 /* per protocol counter -
2203 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2204 if (protocolWeightSum > 0) {
2206 protocol < NUM_OF_PROTOCOLS; protocol++)
2207 /* credit for each period of the
2208 fairness algorithm - number of bytes in
2209 T_FAIR (the protocol share the vn rate) */
2210 m_fair_vn.protocol_credit_delta[protocol] =
2211 (u32)((vn_min_rate / 8) * t_fair *
2212 protocol_min_rate / protocolWeightSum);
2217 /* Store it to internal memory */
2218 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2219 REG_WR(bp, BAR_XSTRORM_INTMEM +
2220 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2221 ((u32 *)(&m_rs_vn))[i]);
2223 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2224 REG_WR(bp, BAR_XSTRORM_INTMEM +
2225 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2226 ((u32 *)(&m_fair_vn))[i]);
2229 /* This function is called upon link interrupt */
2230 static void bnx2x_link_attn(struct bnx2x *bp)
2234 /* Make sure that we are synced with the current statistics */
2235 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2237 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2239 if (bp->link_vars.link_up) {
2241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2242 struct host_port_stats *pstats;
2244 pstats = bnx2x_sp(bp, port_stats);
2245 /* reset old bmac stats */
2246 memset(&(pstats->mac_stx[0]), 0,
2247 sizeof(struct mac_stx));
2249 if ((bp->state == BNX2X_STATE_OPEN) ||
2250 (bp->state == BNX2X_STATE_DISABLED))
2251 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2254 /* indicate link status */
2255 bnx2x_link_report(bp);
2260 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2261 if (vn == BP_E1HVN(bp))
2264 func = ((vn << 1) | BP_PORT(bp));
2266 /* Set the attention towards other drivers
2268 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2269 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2273 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2274 struct cmng_struct_per_port m_cmng_port;
2276 int port = BP_PORT(bp);
2278 /* Init RATE SHAPING and FAIRNESS contexts */
2279 wsum = bnx2x_calc_vn_wsum(bp);
2280 bnx2x_init_port_minmax(bp, (int)wsum,
2281 bp->link_vars.line_speed,
2284 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2285 bnx2x_init_vn_minmax(bp, 2*vn + port,
2286 wsum, bp->link_vars.line_speed,
2291 static void bnx2x__link_status_update(struct bnx2x *bp)
2293 if (bp->state != BNX2X_STATE_OPEN)
2296 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2298 if (bp->link_vars.link_up)
2299 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
2307 static void bnx2x_pmf_update(struct bnx2x *bp)
2309 int port = BP_PORT(bp);
2313 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2315 /* enable nig attention */
2316 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2317 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2318 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2320 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2328 * General service functions
2331 /* the slow path queue is odd since completions arrive on the fastpath ring */
2332 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2333 u32 data_hi, u32 data_lo, int common)
2335 int func = BP_FUNC(bp);
2337 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2338 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2339 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2340 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2341 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2343 #ifdef BNX2X_STOP_ON_ERROR
2344 if (unlikely(bp->panic))
2348 spin_lock_bh(&bp->spq_lock);
2350 if (!bp->spq_left) {
2351 BNX2X_ERR("BUG! SPQ ring full!\n");
2352 spin_unlock_bh(&bp->spq_lock);
2357 /* CID needs port number to be encoded int it */
2358 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2359 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2361 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2363 bp->spq_prod_bd->hdr.type |=
2364 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2366 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2367 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2371 if (bp->spq_prod_bd == bp->spq_last_bd) {
2372 bp->spq_prod_bd = bp->spq;
2373 bp->spq_prod_idx = 0;
2374 DP(NETIF_MSG_TIMER, "end of spq\n");
2381 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2384 spin_unlock_bh(&bp->spq_lock);
2388 /* acquire split MCP access lock register */
2389 static int bnx2x_acquire_alr(struct bnx2x *bp)
2396 for (j = 0; j < i*10; j++) {
2398 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2399 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2400 if (val & (1L << 31))
2405 if (!(val & (1L << 31))) {
2406 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2413 /* release split MCP access lock register */
2414 static void bnx2x_release_alr(struct bnx2x *bp)
2418 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2421 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2423 struct host_def_status_block *def_sb = bp->def_status_blk;
2426 barrier(); /* status block is written to by the chip */
2427 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2428 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2431 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2432 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2435 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2436 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2439 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2440 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2443 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2444 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2451 * slow path service functions
2454 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2456 int port = BP_PORT(bp);
2457 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2458 COMMAND_REG_ATTN_BITS_SET);
2459 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2460 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2461 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2462 NIG_REG_MASK_INTERRUPT_PORT0;
2465 if (bp->attn_state & asserted)
2466 BNX2X_ERR("IGU ERROR\n");
2468 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469 aeu_mask = REG_RD(bp, aeu_addr);
2471 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2472 aeu_mask, asserted);
2473 aeu_mask &= ~(asserted & 0xff);
2474 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2476 REG_WR(bp, aeu_addr, aeu_mask);
2477 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2480 bp->attn_state |= asserted;
2481 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2483 if (asserted & ATTN_HARD_WIRED_MASK) {
2484 if (asserted & ATTN_NIG_FOR_FUNC) {
2486 bnx2x_acquire_phy_lock(bp);
2488 /* save nig interrupt mask */
2489 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2490 REG_WR(bp, nig_int_mask_addr, 0);
2492 bnx2x_link_attn(bp);
2494 /* handle unicore attn? */
2496 if (asserted & ATTN_SW_TIMER_4_FUNC)
2497 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2499 if (asserted & GPIO_2_FUNC)
2500 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2502 if (asserted & GPIO_3_FUNC)
2503 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2505 if (asserted & GPIO_4_FUNC)
2506 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2509 if (asserted & ATTN_GENERAL_ATTN_1) {
2510 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2511 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2513 if (asserted & ATTN_GENERAL_ATTN_2) {
2514 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2515 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2517 if (asserted & ATTN_GENERAL_ATTN_3) {
2518 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2519 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2522 if (asserted & ATTN_GENERAL_ATTN_4) {
2523 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2524 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2526 if (asserted & ATTN_GENERAL_ATTN_5) {
2527 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2530 if (asserted & ATTN_GENERAL_ATTN_6) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2536 } /* if hardwired */
2538 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2540 REG_WR(bp, hc_addr, asserted);
2542 /* now set back the mask */
2543 if (asserted & ATTN_NIG_FOR_FUNC) {
2544 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2545 bnx2x_release_phy_lock(bp);
2549 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2551 int port = BP_PORT(bp);
2555 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2556 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2558 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2560 val = REG_RD(bp, reg_offset);
2561 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2562 REG_WR(bp, reg_offset, val);
2564 BNX2X_ERR("SPIO5 hw attention\n");
2566 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2567 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2568 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2569 /* Fan failure attention */
2571 /* The PHY reset is controlled by GPIO 1 */
2572 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2573 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2574 /* Low power mode is controlled by GPIO 2 */
2575 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2576 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2577 /* mark the failure */
2578 bp->link_params.ext_phy_config &=
2579 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2580 bp->link_params.ext_phy_config |=
2581 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2583 dev_info.port_hw_config[port].
2584 external_phy_config,
2585 bp->link_params.ext_phy_config);
2586 /* log the failure */
2587 printk(KERN_ERR PFX "Fan Failure on Network"
2588 " Controller %s has caused the driver to"
2589 " shutdown the card to prevent permanent"
2590 " damage. Please contact Dell Support for"
2591 " assistance\n", bp->dev->name);
2599 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2601 val = REG_RD(bp, reg_offset);
2602 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2603 REG_WR(bp, reg_offset, val);
2605 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2606 (attn & HW_INTERRUT_ASSERT_SET_0));
2611 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2615 if (attn & BNX2X_DOORQ_ASSERT) {
2617 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2618 BNX2X_ERR("DB hw attention 0x%x\n", val);
2619 /* DORQ discard attention */
2621 BNX2X_ERR("FATAL error from DORQ\n");
2624 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2626 int port = BP_PORT(bp);
2629 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2630 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2632 val = REG_RD(bp, reg_offset);
2633 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2634 REG_WR(bp, reg_offset, val);
2636 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2637 (attn & HW_INTERRUT_ASSERT_SET_1));
2642 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2646 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2648 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2649 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2650 /* CFC error attention */
2652 BNX2X_ERR("FATAL error from CFC\n");
2655 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2657 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2658 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2659 /* RQ_USDMDP_FIFO_OVERFLOW */
2661 BNX2X_ERR("FATAL error from PXP\n");
2664 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2666 int port = BP_PORT(bp);
2669 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2670 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2672 val = REG_RD(bp, reg_offset);
2673 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2674 REG_WR(bp, reg_offset, val);
2676 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2677 (attn & HW_INTERRUT_ASSERT_SET_2));
2682 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2686 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2688 if (attn & BNX2X_PMF_LINK_ASSERT) {
2689 int func = BP_FUNC(bp);
2691 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2692 bnx2x__link_status_update(bp);
2693 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2695 bnx2x_pmf_update(bp);
2697 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2699 BNX2X_ERR("MC assert!\n");
2700 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2701 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2702 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2703 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2706 } else if (attn & BNX2X_MCP_ASSERT) {
2708 BNX2X_ERR("MCP assert!\n");
2709 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2713 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2716 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2717 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2718 if (attn & BNX2X_GRC_TIMEOUT) {
2719 val = CHIP_IS_E1H(bp) ?
2720 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2721 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2723 if (attn & BNX2X_GRC_RSV) {
2724 val = CHIP_IS_E1H(bp) ?
2725 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2726 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2728 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2732 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2734 struct attn_route attn;
2735 struct attn_route group_mask;
2736 int port = BP_PORT(bp);
2742 /* need to take HW lock because MCP or other port might also
2743 try to handle this event */
2744 bnx2x_acquire_alr(bp);
2746 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2747 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2748 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2749 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2750 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2751 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2753 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2754 if (deasserted & (1 << index)) {
2755 group_mask = bp->attn_group[index];
2757 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2758 index, group_mask.sig[0], group_mask.sig[1],
2759 group_mask.sig[2], group_mask.sig[3]);
2761 bnx2x_attn_int_deasserted3(bp,
2762 attn.sig[3] & group_mask.sig[3]);
2763 bnx2x_attn_int_deasserted1(bp,
2764 attn.sig[1] & group_mask.sig[1]);
2765 bnx2x_attn_int_deasserted2(bp,
2766 attn.sig[2] & group_mask.sig[2]);
2767 bnx2x_attn_int_deasserted0(bp,
2768 attn.sig[0] & group_mask.sig[0]);
2770 if ((attn.sig[0] & group_mask.sig[0] &
2771 HW_PRTY_ASSERT_SET_0) ||
2772 (attn.sig[1] & group_mask.sig[1] &
2773 HW_PRTY_ASSERT_SET_1) ||
2774 (attn.sig[2] & group_mask.sig[2] &
2775 HW_PRTY_ASSERT_SET_2))
2776 BNX2X_ERR("FATAL HW block parity attention\n");
2780 bnx2x_release_alr(bp);
2782 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2785 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2787 REG_WR(bp, reg_addr, val);
2789 if (~bp->attn_state & deasserted)
2790 BNX2X_ERR("IGU ERROR\n");
2792 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2793 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2795 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2796 aeu_mask = REG_RD(bp, reg_addr);
2798 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2799 aeu_mask, deasserted);
2800 aeu_mask |= (deasserted & 0xff);
2801 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2803 REG_WR(bp, reg_addr, aeu_mask);
2804 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2807 bp->attn_state &= ~deasserted;
2808 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2811 static void bnx2x_attn_int(struct bnx2x *bp)
2813 /* read local copy of bits */
2814 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2815 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2816 u32 attn_state = bp->attn_state;
2818 /* look for changed bits */
2819 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2820 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2823 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2824 attn_bits, attn_ack, asserted, deasserted);
2826 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2827 BNX2X_ERR("BAD attention state\n");
2829 /* handle bits that were raised */
2831 bnx2x_attn_int_asserted(bp, asserted);
2834 bnx2x_attn_int_deasserted(bp, deasserted);
2837 static void bnx2x_sp_task(struct work_struct *work)
2839 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2843 /* Return here if interrupt is disabled */
2844 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2845 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2849 status = bnx2x_update_dsb_idx(bp);
2850 /* if (status == 0) */
2851 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2853 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2859 /* CStorm events: query_stats, port delete ramrod */
2861 bp->stats_pending = 0;
2863 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2865 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2867 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2869 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2871 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2876 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2878 struct net_device *dev = dev_instance;
2879 struct bnx2x *bp = netdev_priv(dev);
2881 /* Return here if interrupt is disabled */
2882 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2883 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2887 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2889 #ifdef BNX2X_STOP_ON_ERROR
2890 if (unlikely(bp->panic))
2894 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2899 /* end of slow path */
2903 /****************************************************************************
2905 ****************************************************************************/
2907 /* sum[hi:lo] += add[hi:lo] */
2908 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2911 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2914 /* difference = minuend - subtrahend */
2915 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2917 if (m_lo < s_lo) { \
2919 d_hi = m_hi - s_hi; \
2921 /* we can 'loan' 1 */ \
2923 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2925 /* m_hi <= s_hi */ \
2930 /* m_lo >= s_lo */ \
2931 if (m_hi < s_hi) { \
2935 /* m_hi >= s_hi */ \
2936 d_hi = m_hi - s_hi; \
2937 d_lo = m_lo - s_lo; \
2942 #define UPDATE_STAT64(s, t) \
2944 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2945 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2946 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2947 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2948 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2949 pstats->mac_stx[1].t##_lo, diff.lo); \
2952 #define UPDATE_STAT64_NIG(s, t) \
2954 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2955 diff.lo, new->s##_lo, old->s##_lo); \
2956 ADD_64(estats->t##_hi, diff.hi, \
2957 estats->t##_lo, diff.lo); \
2960 /* sum[hi:lo] += add */
2961 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2964 s_hi += (s_lo < a) ? 1 : 0; \
2967 #define UPDATE_EXTEND_STAT(s) \
2969 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2970 pstats->mac_stx[1].s##_lo, \
2974 #define UPDATE_EXTEND_TSTAT(s, t) \
2976 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2977 old_tclient->s = le32_to_cpu(tclient->s); \
2978 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2981 #define UPDATE_EXTEND_XSTAT(s, t) \
2983 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2984 old_xclient->s = le32_to_cpu(xclient->s); \
2985 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2989 * General service functions
2992 static inline long bnx2x_hilo(u32 *hiref)
2994 u32 lo = *(hiref + 1);
2995 #if (BITS_PER_LONG == 64)
2998 return HILO_U64(hi, lo);
3005 * Init service functions
3008 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3010 if (!bp->stats_pending) {
3011 struct eth_query_ramrod_data ramrod_data = {0};
3014 ramrod_data.drv_counter = bp->stats_counter++;
3015 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3016 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3018 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3019 ((u32 *)&ramrod_data)[1],
3020 ((u32 *)&ramrod_data)[0], 0);
3022 /* stats ramrod has it's own slot on the spq */
3024 bp->stats_pending = 1;
3029 static void bnx2x_stats_init(struct bnx2x *bp)
3031 int port = BP_PORT(bp);
3033 bp->executer_idx = 0;
3034 bp->stats_counter = 0;
3038 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3040 bp->port.port_stx = 0;
3041 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3043 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3044 bp->port.old_nig_stats.brb_discard =
3045 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3046 bp->port.old_nig_stats.brb_truncate =
3047 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3048 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3049 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3050 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3051 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3053 /* function stats */
3054 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3055 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3056 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3057 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3059 bp->stats_state = STATS_STATE_DISABLED;
3060 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3061 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3064 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3066 struct dmae_command *dmae = &bp->stats_dmae;
3067 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3069 *stats_comp = DMAE_COMP_VAL;
3072 if (bp->executer_idx) {
3073 int loader_idx = PMF_DMAE_C(bp);
3075 memset(dmae, 0, sizeof(struct dmae_command));
3077 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3078 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3079 DMAE_CMD_DST_RESET |
3081 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3083 DMAE_CMD_ENDIANITY_DW_SWAP |
3085 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3087 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3088 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3089 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3090 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3091 sizeof(struct dmae_command) *
3092 (loader_idx + 1)) >> 2;
3093 dmae->dst_addr_hi = 0;
3094 dmae->len = sizeof(struct dmae_command) >> 2;
3097 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3098 dmae->comp_addr_hi = 0;
3102 bnx2x_post_dmae(bp, dmae, loader_idx);
3104 } else if (bp->func_stx) {
3106 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3110 static int bnx2x_stats_comp(struct bnx2x *bp)
3112 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3116 while (*stats_comp != DMAE_COMP_VAL) {
3118 BNX2X_ERR("timeout waiting for stats finished\n");
3128 * Statistics service functions
3131 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3133 struct dmae_command *dmae;
3135 int loader_idx = PMF_DMAE_C(bp);
3136 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3139 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3140 BNX2X_ERR("BUG!\n");
3144 bp->executer_idx = 0;
3146 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3148 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3150 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3152 DMAE_CMD_ENDIANITY_DW_SWAP |
3154 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3155 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3157 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3158 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3159 dmae->src_addr_lo = bp->port.port_stx >> 2;
3160 dmae->src_addr_hi = 0;
3161 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3162 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3163 dmae->len = DMAE_LEN32_RD_MAX;
3164 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3165 dmae->comp_addr_hi = 0;
3168 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3169 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3170 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3171 dmae->src_addr_hi = 0;
3172 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3173 DMAE_LEN32_RD_MAX * 4);
3174 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3175 DMAE_LEN32_RD_MAX * 4);
3176 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3177 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3178 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3179 dmae->comp_val = DMAE_COMP_VAL;
3182 bnx2x_hw_stats_post(bp);
3183 bnx2x_stats_comp(bp);
3186 static void bnx2x_port_stats_init(struct bnx2x *bp)
3188 struct dmae_command *dmae;
3189 int port = BP_PORT(bp);
3190 int vn = BP_E1HVN(bp);
3192 int loader_idx = PMF_DMAE_C(bp);
3194 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3197 if (!bp->link_vars.link_up || !bp->port.pmf) {
3198 BNX2X_ERR("BUG!\n");
3202 bp->executer_idx = 0;
3205 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3206 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3207 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3209 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3211 DMAE_CMD_ENDIANITY_DW_SWAP |
3213 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3214 (vn << DMAE_CMD_E1HVN_SHIFT));
3216 if (bp->port.port_stx) {
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = opcode;
3220 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3221 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3222 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3223 dmae->dst_addr_hi = 0;
3224 dmae->len = sizeof(struct host_port_stats) >> 2;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3232 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3233 dmae->opcode = opcode;
3234 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3235 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3236 dmae->dst_addr_lo = bp->func_stx >> 2;
3237 dmae->dst_addr_hi = 0;
3238 dmae->len = sizeof(struct host_func_stats) >> 2;
3239 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3240 dmae->comp_addr_hi = 0;
3245 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3246 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3247 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3249 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3251 DMAE_CMD_ENDIANITY_DW_SWAP |
3253 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3254 (vn << DMAE_CMD_E1HVN_SHIFT));
3256 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3258 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3259 NIG_REG_INGRESS_BMAC0_MEM);
3261 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3262 BIGMAC_REGISTER_TX_STAT_GTBYT */
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = (mac_addr +
3266 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3269 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3270 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3271 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3272 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3273 dmae->comp_addr_hi = 0;
3276 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3277 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3278 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3279 dmae->opcode = opcode;
3280 dmae->src_addr_lo = (mac_addr +
3281 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3282 dmae->src_addr_hi = 0;
3283 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3284 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3285 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3286 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3287 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3288 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3289 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3290 dmae->comp_addr_hi = 0;
3293 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3295 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3297 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3299 dmae->opcode = opcode;
3300 dmae->src_addr_lo = (mac_addr +
3301 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3302 dmae->src_addr_hi = 0;
3303 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3304 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3305 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3310 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312 dmae->opcode = opcode;
3313 dmae->src_addr_lo = (mac_addr +
3314 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3317 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3319 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3325 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3326 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3327 dmae->opcode = opcode;
3328 dmae->src_addr_lo = (mac_addr +
3329 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3330 dmae->src_addr_hi = 0;
3331 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3332 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3333 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3334 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3335 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3342 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3343 dmae->opcode = opcode;
3344 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3345 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3346 dmae->src_addr_hi = 0;
3347 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3349 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3357 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3360 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3361 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3362 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3363 dmae->len = (2*sizeof(u32)) >> 2;
3364 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3365 dmae->comp_addr_hi = 0;
3368 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3369 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3370 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3371 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3373 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3375 DMAE_CMD_ENDIANITY_DW_SWAP |
3377 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3378 (vn << DMAE_CMD_E1HVN_SHIFT));
3379 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3380 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3381 dmae->src_addr_hi = 0;
3382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3384 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3386 dmae->len = (2*sizeof(u32)) >> 2;
3387 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3388 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3389 dmae->comp_val = DMAE_COMP_VAL;
3394 static void bnx2x_func_stats_init(struct bnx2x *bp)
3396 struct dmae_command *dmae = &bp->stats_dmae;
3397 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3400 if (!bp->func_stx) {
3401 BNX2X_ERR("BUG!\n");
3405 bp->executer_idx = 0;
3406 memset(dmae, 0, sizeof(struct dmae_command));
3408 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3409 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3410 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3412 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3414 DMAE_CMD_ENDIANITY_DW_SWAP |
3416 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3417 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3418 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3419 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3420 dmae->dst_addr_lo = bp->func_stx >> 2;
3421 dmae->dst_addr_hi = 0;
3422 dmae->len = sizeof(struct host_func_stats) >> 2;
3423 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3424 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3425 dmae->comp_val = DMAE_COMP_VAL;
3430 static void bnx2x_stats_start(struct bnx2x *bp)
3433 bnx2x_port_stats_init(bp);
3435 else if (bp->func_stx)
3436 bnx2x_func_stats_init(bp);
3438 bnx2x_hw_stats_post(bp);
3439 bnx2x_storm_stats_post(bp);
3442 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3444 bnx2x_stats_comp(bp);
3445 bnx2x_stats_pmf_update(bp);
3446 bnx2x_stats_start(bp);
3449 static void bnx2x_stats_restart(struct bnx2x *bp)
3451 bnx2x_stats_comp(bp);
3452 bnx2x_stats_start(bp);
3455 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3457 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3458 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3459 struct regpair diff;
3461 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3462 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3463 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3464 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3465 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3466 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3467 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3468 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3469 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3470 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3471 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3472 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3473 UPDATE_STAT64(tx_stat_gt127,
3474 tx_stat_etherstatspkts65octetsto127octets);
3475 UPDATE_STAT64(tx_stat_gt255,
3476 tx_stat_etherstatspkts128octetsto255octets);
3477 UPDATE_STAT64(tx_stat_gt511,
3478 tx_stat_etherstatspkts256octetsto511octets);
3479 UPDATE_STAT64(tx_stat_gt1023,
3480 tx_stat_etherstatspkts512octetsto1023octets);
3481 UPDATE_STAT64(tx_stat_gt1518,
3482 tx_stat_etherstatspkts1024octetsto1522octets);
3483 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3484 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3485 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3486 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3487 UPDATE_STAT64(tx_stat_gterr,
3488 tx_stat_dot3statsinternalmactransmiterrors);
3489 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3492 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3494 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3495 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3497 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3498 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3499 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3500 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3501 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3502 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3503 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3504 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3505 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3506 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3507 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3508 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3509 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3510 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3511 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3512 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3513 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3514 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3517 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3518 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3519 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3524 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3525 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3527 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3530 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3532 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3533 struct nig_stats *old = &(bp->port.old_nig_stats);
3534 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3535 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3536 struct regpair diff;
3538 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3539 bnx2x_bmac_stats_update(bp);
3541 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3542 bnx2x_emac_stats_update(bp);
3544 else { /* unreached */
3545 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3549 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3550 new->brb_discard - old->brb_discard);
3551 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3552 new->brb_truncate - old->brb_truncate);
3554 UPDATE_STAT64_NIG(egress_mac_pkt0,
3555 etherstatspkts1024octetsto1522octets);
3556 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3558 memcpy(old, new, sizeof(struct nig_stats));
3560 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3561 sizeof(struct mac_stx));
3562 estats->brb_drop_hi = pstats->brb_drop_hi;
3563 estats->brb_drop_lo = pstats->brb_drop_lo;
3565 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3570 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3572 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3573 int cl_id = BP_CL_ID(bp);
3574 struct tstorm_per_port_stats *tport =
3575 &stats->tstorm_common.port_statistics;
3576 struct tstorm_per_client_stats *tclient =
3577 &stats->tstorm_common.client_statistics[cl_id];
3578 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3579 struct xstorm_per_client_stats *xclient =
3580 &stats->xstorm_common.client_statistics[cl_id];
3581 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3582 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3583 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3586 /* are storm stats valid? */
3587 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3588 bp->stats_counter) {
3589 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3590 " tstorm counter (%d) != stats_counter (%d)\n",
3591 tclient->stats_counter, bp->stats_counter);
3594 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3595 bp->stats_counter) {
3596 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3597 " xstorm counter (%d) != stats_counter (%d)\n",
3598 xclient->stats_counter, bp->stats_counter);
3602 fstats->total_bytes_received_hi =
3603 fstats->valid_bytes_received_hi =
3604 le32_to_cpu(tclient->total_rcv_bytes.hi);
3605 fstats->total_bytes_received_lo =
3606 fstats->valid_bytes_received_lo =
3607 le32_to_cpu(tclient->total_rcv_bytes.lo);
3609 estats->error_bytes_received_hi =
3610 le32_to_cpu(tclient->rcv_error_bytes.hi);
3611 estats->error_bytes_received_lo =
3612 le32_to_cpu(tclient->rcv_error_bytes.lo);
3613 ADD_64(estats->error_bytes_received_hi,
3614 estats->rx_stat_ifhcinbadoctets_hi,
3615 estats->error_bytes_received_lo,
3616 estats->rx_stat_ifhcinbadoctets_lo);
3618 ADD_64(fstats->total_bytes_received_hi,
3619 estats->error_bytes_received_hi,
3620 fstats->total_bytes_received_lo,
3621 estats->error_bytes_received_lo);
3623 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3624 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3625 total_multicast_packets_received);
3626 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3627 total_broadcast_packets_received);
3629 fstats->total_bytes_transmitted_hi =
3630 le32_to_cpu(xclient->total_sent_bytes.hi);
3631 fstats->total_bytes_transmitted_lo =
3632 le32_to_cpu(xclient->total_sent_bytes.lo);
3634 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3635 total_unicast_packets_transmitted);
3636 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3637 total_multicast_packets_transmitted);
3638 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3639 total_broadcast_packets_transmitted);
3641 memcpy(estats, &(fstats->total_bytes_received_hi),
3642 sizeof(struct host_func_stats) - 2*sizeof(u32));
3644 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3645 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3646 estats->brb_truncate_discard =
3647 le32_to_cpu(tport->brb_truncate_discard);
3648 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3650 old_tclient->rcv_unicast_bytes.hi =
3651 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3652 old_tclient->rcv_unicast_bytes.lo =
3653 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3654 old_tclient->rcv_broadcast_bytes.hi =
3655 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3656 old_tclient->rcv_broadcast_bytes.lo =
3657 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3658 old_tclient->rcv_multicast_bytes.hi =
3659 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3660 old_tclient->rcv_multicast_bytes.lo =
3661 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3662 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3664 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3665 old_tclient->packets_too_big_discard =
3666 le32_to_cpu(tclient->packets_too_big_discard);
3667 estats->no_buff_discard =
3668 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3669 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3671 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3672 old_xclient->unicast_bytes_sent.hi =
3673 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3674 old_xclient->unicast_bytes_sent.lo =
3675 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3676 old_xclient->multicast_bytes_sent.hi =
3677 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3678 old_xclient->multicast_bytes_sent.lo =
3679 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3680 old_xclient->broadcast_bytes_sent.hi =
3681 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3682 old_xclient->broadcast_bytes_sent.lo =
3683 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3685 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3690 static void bnx2x_net_stats_update(struct bnx2x *bp)
3692 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3693 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3694 struct net_device_stats *nstats = &bp->dev->stats;
3696 nstats->rx_packets =
3697 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3698 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3699 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3701 nstats->tx_packets =
3702 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3703 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3704 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3706 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3708 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3710 nstats->rx_dropped = old_tclient->checksum_discard +
3711 estats->mac_discard;
3712 nstats->tx_dropped = 0;
3715 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3717 nstats->collisions =
3718 estats->tx_stat_dot3statssinglecollisionframes_lo +
3719 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3720 estats->tx_stat_dot3statslatecollisions_lo +
3721 estats->tx_stat_dot3statsexcessivecollisions_lo;
3723 estats->jabber_packets_received =
3724 old_tclient->packets_too_big_discard +
3725 estats->rx_stat_dot3statsframestoolong_lo;
3727 nstats->rx_length_errors =
3728 estats->rx_stat_etherstatsundersizepkts_lo +
3729 estats->jabber_packets_received;
3730 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3731 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3732 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3733 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3734 nstats->rx_missed_errors = estats->xxoverflow_discard;
3736 nstats->rx_errors = nstats->rx_length_errors +
3737 nstats->rx_over_errors +
3738 nstats->rx_crc_errors +
3739 nstats->rx_frame_errors +
3740 nstats->rx_fifo_errors +
3741 nstats->rx_missed_errors;
3743 nstats->tx_aborted_errors =
3744 estats->tx_stat_dot3statslatecollisions_lo +
3745 estats->tx_stat_dot3statsexcessivecollisions_lo;
3746 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3747 nstats->tx_fifo_errors = 0;
3748 nstats->tx_heartbeat_errors = 0;
3749 nstats->tx_window_errors = 0;
3751 nstats->tx_errors = nstats->tx_aborted_errors +
3752 nstats->tx_carrier_errors;
3755 static void bnx2x_stats_update(struct bnx2x *bp)
3757 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3760 if (*stats_comp != DMAE_COMP_VAL)
3764 update = (bnx2x_hw_stats_update(bp) == 0);
3766 update |= (bnx2x_storm_stats_update(bp) == 0);
3769 bnx2x_net_stats_update(bp);
3772 if (bp->stats_pending) {
3773 bp->stats_pending++;
3774 if (bp->stats_pending == 3) {
3775 BNX2X_ERR("stats not updated for 3 times\n");
3782 if (bp->msglevel & NETIF_MSG_TIMER) {
3783 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3784 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3785 struct net_device_stats *nstats = &bp->dev->stats;
3788 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3789 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3791 bnx2x_tx_avail(bp->fp),
3792 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3793 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3795 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3796 bp->fp->rx_comp_cons),
3797 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3798 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3799 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3800 estats->driver_xoff, estats->brb_drop_lo);
3801 printk(KERN_DEBUG "tstats: checksum_discard %u "
3802 "packets_too_big_discard %u no_buff_discard %u "
3803 "mac_discard %u mac_filter_discard %u "
3804 "xxovrflow_discard %u brb_truncate_discard %u "
3805 "ttl0_discard %u\n",
3806 old_tclient->checksum_discard,
3807 old_tclient->packets_too_big_discard,
3808 old_tclient->no_buff_discard, estats->mac_discard,
3809 estats->mac_filter_discard, estats->xxoverflow_discard,
3810 estats->brb_truncate_discard,
3811 old_tclient->ttl0_discard);
3813 for_each_queue(bp, i) {
3814 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3815 bnx2x_fp(bp, i, tx_pkt),
3816 bnx2x_fp(bp, i, rx_pkt),
3817 bnx2x_fp(bp, i, rx_calls));
3821 bnx2x_hw_stats_post(bp);
3822 bnx2x_storm_stats_post(bp);
3825 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3827 struct dmae_command *dmae;
3829 int loader_idx = PMF_DMAE_C(bp);
3830 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3832 bp->executer_idx = 0;
3834 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3836 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3838 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3840 DMAE_CMD_ENDIANITY_DW_SWAP |
3842 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3843 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3845 if (bp->port.port_stx) {
3847 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3849 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3851 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3852 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3853 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3854 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3855 dmae->dst_addr_hi = 0;
3856 dmae->len = sizeof(struct host_port_stats) >> 2;
3858 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3859 dmae->comp_addr_hi = 0;
3862 dmae->comp_addr_lo =
3863 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3864 dmae->comp_addr_hi =
3865 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3866 dmae->comp_val = DMAE_COMP_VAL;
3874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3875 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3876 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3877 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3878 dmae->dst_addr_lo = bp->func_stx >> 2;
3879 dmae->dst_addr_hi = 0;
3880 dmae->len = sizeof(struct host_func_stats) >> 2;
3881 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3882 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3883 dmae->comp_val = DMAE_COMP_VAL;
3889 static void bnx2x_stats_stop(struct bnx2x *bp)
3893 bnx2x_stats_comp(bp);
3896 update = (bnx2x_hw_stats_update(bp) == 0);
3898 update |= (bnx2x_storm_stats_update(bp) == 0);
3901 bnx2x_net_stats_update(bp);
3904 bnx2x_port_stats_stop(bp);
3906 bnx2x_hw_stats_post(bp);
3907 bnx2x_stats_comp(bp);
3911 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3915 static const struct {
3916 void (*action)(struct bnx2x *bp);
3917 enum bnx2x_stats_state next_state;
3918 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3921 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3922 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3923 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3924 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3927 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3928 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3929 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3930 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3934 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3936 enum bnx2x_stats_state state = bp->stats_state;
3938 bnx2x_stats_stm[state][event].action(bp);
3939 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3941 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3942 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3943 state, event, bp->stats_state);
3946 static void bnx2x_timer(unsigned long data)
3948 struct bnx2x *bp = (struct bnx2x *) data;
3950 if (!netif_running(bp->dev))
3953 if (atomic_read(&bp->intr_sem) != 0)
3957 struct bnx2x_fastpath *fp = &bp->fp[0];
3960 bnx2x_tx_int(fp, 1000);
3961 rc = bnx2x_rx_int(fp, 1000);
3964 if (!BP_NOMCP(bp)) {
3965 int func = BP_FUNC(bp);
3969 ++bp->fw_drv_pulse_wr_seq;
3970 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3971 /* TBD - add SYSTEM_TIME */
3972 drv_pulse = bp->fw_drv_pulse_wr_seq;
3973 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3975 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3976 MCP_PULSE_SEQ_MASK);
3977 /* The delta between driver pulse and mcp response
3978 * should be 1 (before mcp response) or 0 (after mcp response)
3980 if ((drv_pulse != mcp_pulse) &&
3981 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3982 /* someone lost a heartbeat... */
3983 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3984 drv_pulse, mcp_pulse);
3988 if ((bp->state == BNX2X_STATE_OPEN) ||
3989 (bp->state == BNX2X_STATE_DISABLED))
3990 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3993 mod_timer(&bp->timer, jiffies + bp->current_interval);
3996 /* end of Statistics */
4001 * nic init service functions
4004 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4006 int port = BP_PORT(bp);
4008 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4009 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4010 sizeof(struct ustorm_status_block)/4);
4011 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4012 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4013 sizeof(struct cstorm_status_block)/4);
4016 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4017 dma_addr_t mapping, int sb_id)
4019 int port = BP_PORT(bp);
4020 int func = BP_FUNC(bp);
4025 section = ((u64)mapping) + offsetof(struct host_status_block,
4027 sb->u_status_block.status_block_id = sb_id;
4029 REG_WR(bp, BAR_USTRORM_INTMEM +
4030 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4031 REG_WR(bp, BAR_USTRORM_INTMEM +
4032 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4034 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4035 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4037 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4038 REG_WR16(bp, BAR_USTRORM_INTMEM +
4039 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042 section = ((u64)mapping) + offsetof(struct host_status_block,
4044 sb->c_status_block.status_block_id = sb_id;
4046 REG_WR(bp, BAR_CSTRORM_INTMEM +
4047 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4048 REG_WR(bp, BAR_CSTRORM_INTMEM +
4049 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4051 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4052 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4054 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4055 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4056 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4058 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4061 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4063 int func = BP_FUNC(bp);
4065 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4066 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4067 sizeof(struct ustorm_def_status_block)/4);
4068 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4069 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4070 sizeof(struct cstorm_def_status_block)/4);
4071 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4072 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4073 sizeof(struct xstorm_def_status_block)/4);
4074 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4075 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4076 sizeof(struct tstorm_def_status_block)/4);
4079 static void bnx2x_init_def_sb(struct bnx2x *bp,
4080 struct host_def_status_block *def_sb,
4081 dma_addr_t mapping, int sb_id)
4083 int port = BP_PORT(bp);
4084 int func = BP_FUNC(bp);
4085 int index, val, reg_offset;
4089 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4090 atten_status_block);
4091 def_sb->atten_status_block.status_block_id = sb_id;
4095 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4096 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4098 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4099 bp->attn_group[index].sig[0] = REG_RD(bp,
4100 reg_offset + 0x10*index);
4101 bp->attn_group[index].sig[1] = REG_RD(bp,
4102 reg_offset + 0x4 + 0x10*index);
4103 bp->attn_group[index].sig[2] = REG_RD(bp,
4104 reg_offset + 0x8 + 0x10*index);
4105 bp->attn_group[index].sig[3] = REG_RD(bp,
4106 reg_offset + 0xc + 0x10*index);
4109 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4110 HC_REG_ATTN_MSG0_ADDR_L);
4112 REG_WR(bp, reg_offset, U64_LO(section));
4113 REG_WR(bp, reg_offset + 4, U64_HI(section));
4115 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4117 val = REG_RD(bp, reg_offset);
4119 REG_WR(bp, reg_offset, val);
4122 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4123 u_def_status_block);
4124 def_sb->u_def_status_block.status_block_id = sb_id;
4126 REG_WR(bp, BAR_USTRORM_INTMEM +
4127 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4128 REG_WR(bp, BAR_USTRORM_INTMEM +
4129 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4131 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4132 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4134 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4135 REG_WR16(bp, BAR_USTRORM_INTMEM +
4136 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4139 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4140 c_def_status_block);
4141 def_sb->c_def_status_block.status_block_id = sb_id;
4143 REG_WR(bp, BAR_CSTRORM_INTMEM +
4144 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4145 REG_WR(bp, BAR_CSTRORM_INTMEM +
4146 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4148 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4149 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4151 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4152 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4153 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4156 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4157 t_def_status_block);
4158 def_sb->t_def_status_block.status_block_id = sb_id;
4160 REG_WR(bp, BAR_TSTRORM_INTMEM +
4161 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4162 REG_WR(bp, BAR_TSTRORM_INTMEM +
4163 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4166 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4169 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4170 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4173 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4174 x_def_status_block);
4175 def_sb->x_def_status_block.status_block_id = sb_id;
4177 REG_WR(bp, BAR_XSTRORM_INTMEM +
4178 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4179 REG_WR(bp, BAR_XSTRORM_INTMEM +
4180 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4182 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4183 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4185 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4186 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4187 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4189 bp->stats_pending = 0;
4190 bp->set_mac_pending = 0;
4192 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4195 static void bnx2x_update_coalesce(struct bnx2x *bp)
4197 int port = BP_PORT(bp);
4200 for_each_queue(bp, i) {
4201 int sb_id = bp->fp[i].sb_id;
4203 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4204 REG_WR8(bp, BAR_USTRORM_INTMEM +
4205 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206 U_SB_ETH_RX_CQ_INDEX),
4208 REG_WR16(bp, BAR_USTRORM_INTMEM +
4209 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210 U_SB_ETH_RX_CQ_INDEX),
4211 bp->rx_ticks ? 0 : 1);
4212 REG_WR16(bp, BAR_USTRORM_INTMEM +
4213 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4214 U_SB_ETH_RX_BD_INDEX),
4215 bp->rx_ticks ? 0 : 1);
4217 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4218 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4219 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4220 C_SB_ETH_TX_CQ_INDEX),
4222 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4223 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4224 C_SB_ETH_TX_CQ_INDEX),
4225 bp->tx_ticks ? 0 : 1);
4229 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4230 struct bnx2x_fastpath *fp, int last)
4234 for (i = 0; i < last; i++) {
4235 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4236 struct sk_buff *skb = rx_buf->skb;
4239 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4243 if (fp->tpa_state[i] == BNX2X_TPA_START)
4244 pci_unmap_single(bp->pdev,
4245 pci_unmap_addr(rx_buf, mapping),
4247 PCI_DMA_FROMDEVICE);
4254 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4256 int func = BP_FUNC(bp);
4257 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4258 ETH_MAX_AGGREGATION_QUEUES_E1H;
4259 u16 ring_prod, cqe_ring_prod;
4262 bp->rx_buf_size = bp->dev->mtu;
4263 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4264 BCM_RX_ETH_PAYLOAD_ALIGN;
4266 if (bp->flags & TPA_ENABLE_FLAG) {
4268 "rx_buf_size %d effective_mtu %d\n",
4269 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4271 for_each_queue(bp, j) {
4272 struct bnx2x_fastpath *fp = &bp->fp[j];
4274 for (i = 0; i < max_agg_queues; i++) {
4275 fp->tpa_pool[i].skb =
4276 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4277 if (!fp->tpa_pool[i].skb) {
4278 BNX2X_ERR("Failed to allocate TPA "
4279 "skb pool for queue[%d] - "
4280 "disabling TPA on this "
4282 bnx2x_free_tpa_pool(bp, fp, i);
4283 fp->disable_tpa = 1;
4286 pci_unmap_addr_set((struct sw_rx_bd *)
4287 &bp->fp->tpa_pool[i],
4289 fp->tpa_state[i] = BNX2X_TPA_STOP;
4294 for_each_queue(bp, j) {
4295 struct bnx2x_fastpath *fp = &bp->fp[j];
4298 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4299 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4301 /* "next page" elements initialization */
4303 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4304 struct eth_rx_sge *sge;
4306 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4308 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4311 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4312 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4315 bnx2x_init_sge_ring_bit_mask(fp);
4318 for (i = 1; i <= NUM_RX_RINGS; i++) {
4319 struct eth_rx_bd *rx_bd;
4321 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4323 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4326 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4327 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4331 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4332 struct eth_rx_cqe_next_page *nextpg;
4334 nextpg = (struct eth_rx_cqe_next_page *)
4335 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4337 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4338 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4340 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4341 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4344 /* Allocate SGEs and initialize the ring elements */
4345 for (i = 0, ring_prod = 0;
4346 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4348 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4349 BNX2X_ERR("was only able to allocate "
4351 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4352 /* Cleanup already allocated elements */
4353 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4354 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4355 fp->disable_tpa = 1;
4359 ring_prod = NEXT_SGE_IDX(ring_prod);
4361 fp->rx_sge_prod = ring_prod;
4363 /* Allocate BDs and initialize BD ring */
4364 fp->rx_comp_cons = 0;
4365 cqe_ring_prod = ring_prod = 0;
4366 for (i = 0; i < bp->rx_ring_size; i++) {
4367 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4368 BNX2X_ERR("was only able to allocate "
4370 bp->eth_stats.rx_skb_alloc_failed++;
4373 ring_prod = NEXT_RX_IDX(ring_prod);
4374 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4375 WARN_ON(ring_prod <= i);
4378 fp->rx_bd_prod = ring_prod;
4379 /* must not have more available CQEs than BDs */
4380 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4382 fp->rx_pkt = fp->rx_calls = 0;
4385 * this will generate an interrupt (to the TSTORM)
4386 * must only be done after chip is initialized
4388 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4393 REG_WR(bp, BAR_USTRORM_INTMEM +
4394 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4395 U64_LO(fp->rx_comp_mapping));
4396 REG_WR(bp, BAR_USTRORM_INTMEM +
4397 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4398 U64_HI(fp->rx_comp_mapping));
4402 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4406 for_each_queue(bp, j) {
4407 struct bnx2x_fastpath *fp = &bp->fp[j];
4409 for (i = 1; i <= NUM_TX_RINGS; i++) {
4410 struct eth_tx_bd *tx_bd =
4411 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4414 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4415 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4417 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4418 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4421 fp->tx_pkt_prod = 0;
4422 fp->tx_pkt_cons = 0;
4425 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4430 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4432 int func = BP_FUNC(bp);
4434 spin_lock_init(&bp->spq_lock);
4436 bp->spq_left = MAX_SPQ_PENDING;
4437 bp->spq_prod_idx = 0;
4438 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4439 bp->spq_prod_bd = bp->spq;
4440 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4442 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4443 U64_LO(bp->spq_mapping));
4445 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4446 U64_HI(bp->spq_mapping));
4448 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4452 static void bnx2x_init_context(struct bnx2x *bp)
4456 for_each_queue(bp, i) {
4457 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4458 struct bnx2x_fastpath *fp = &bp->fp[i];
4459 u8 sb_id = FP_SB_ID(fp);
4461 context->xstorm_st_context.tx_bd_page_base_hi =
4462 U64_HI(fp->tx_desc_mapping);
4463 context->xstorm_st_context.tx_bd_page_base_lo =
4464 U64_LO(fp->tx_desc_mapping);
4465 context->xstorm_st_context.db_data_addr_hi =
4466 U64_HI(fp->tx_prods_mapping);
4467 context->xstorm_st_context.db_data_addr_lo =
4468 U64_LO(fp->tx_prods_mapping);
4469 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4470 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4472 context->ustorm_st_context.common.sb_index_numbers =
4473 BNX2X_RX_SB_INDEX_NUM;
4474 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4475 context->ustorm_st_context.common.status_block_id = sb_id;
4476 context->ustorm_st_context.common.flags =
4477 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4478 context->ustorm_st_context.common.mc_alignment_size =
4479 BCM_RX_ETH_PAYLOAD_ALIGN;
4480 context->ustorm_st_context.common.bd_buff_size =
4482 context->ustorm_st_context.common.bd_page_base_hi =
4483 U64_HI(fp->rx_desc_mapping);
4484 context->ustorm_st_context.common.bd_page_base_lo =
4485 U64_LO(fp->rx_desc_mapping);
4486 if (!fp->disable_tpa) {
4487 context->ustorm_st_context.common.flags |=
4488 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4489 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4490 context->ustorm_st_context.common.sge_buff_size =
4491 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4492 context->ustorm_st_context.common.sge_page_base_hi =
4493 U64_HI(fp->rx_sge_mapping);
4494 context->ustorm_st_context.common.sge_page_base_lo =
4495 U64_LO(fp->rx_sge_mapping);
4498 context->cstorm_st_context.sb_index_number =
4499 C_SB_ETH_TX_CQ_INDEX;
4500 context->cstorm_st_context.status_block_id = sb_id;
4502 context->xstorm_ag_context.cdu_reserved =
4503 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4504 CDU_REGION_NUMBER_XCM_AG,
4505 ETH_CONNECTION_TYPE);
4506 context->ustorm_ag_context.cdu_usage =
4507 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4508 CDU_REGION_NUMBER_UCM_AG,
4509 ETH_CONNECTION_TYPE);
4513 static void bnx2x_init_ind_table(struct bnx2x *bp)
4515 int port = BP_PORT(bp);
4521 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4522 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4523 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4524 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4525 i % bp->num_queues);
4527 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4530 static void bnx2x_set_client_config(struct bnx2x *bp)
4532 struct tstorm_eth_client_config tstorm_client = {0};
4533 int port = BP_PORT(bp);
4536 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4537 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4538 tstorm_client.config_flags =
4539 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541 if (bp->rx_mode && bp->vlgrp) {
4542 tstorm_client.config_flags |=
4543 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4544 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4548 if (bp->flags & TPA_ENABLE_FLAG) {
4549 tstorm_client.max_sges_for_packet =
4550 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4551 tstorm_client.max_sges_for_packet =
4552 ((tstorm_client.max_sges_for_packet +
4553 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4554 PAGES_PER_SGE_SHIFT;
4556 tstorm_client.config_flags |=
4557 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4560 for_each_queue(bp, i) {
4561 REG_WR(bp, BAR_TSTRORM_INTMEM +
4562 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4563 ((u32 *)&tstorm_client)[0]);
4564 REG_WR(bp, BAR_TSTRORM_INTMEM +
4565 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4566 ((u32 *)&tstorm_client)[1]);
4569 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4570 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4573 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4575 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4576 int mode = bp->rx_mode;
4577 int mask = (1 << BP_L_ID(bp));
4578 int func = BP_FUNC(bp);
4581 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4584 case BNX2X_RX_MODE_NONE: /* no Rx */
4585 tstorm_mac_filter.ucast_drop_all = mask;
4586 tstorm_mac_filter.mcast_drop_all = mask;
4587 tstorm_mac_filter.bcast_drop_all = mask;
4589 case BNX2X_RX_MODE_NORMAL:
4590 tstorm_mac_filter.bcast_accept_all = mask;
4592 case BNX2X_RX_MODE_ALLMULTI:
4593 tstorm_mac_filter.mcast_accept_all = mask;
4594 tstorm_mac_filter.bcast_accept_all = mask;
4596 case BNX2X_RX_MODE_PROMISC:
4597 tstorm_mac_filter.ucast_accept_all = mask;
4598 tstorm_mac_filter.mcast_accept_all = mask;
4599 tstorm_mac_filter.bcast_accept_all = mask;
4602 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4606 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4607 REG_WR(bp, BAR_TSTRORM_INTMEM +
4608 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4609 ((u32 *)&tstorm_mac_filter)[i]);
4611 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4612 ((u32 *)&tstorm_mac_filter)[i]); */
4615 if (mode != BNX2X_RX_MODE_NONE)
4616 bnx2x_set_client_config(bp);
4619 static void bnx2x_init_internal_common(struct bnx2x *bp)
4623 if (bp->flags & TPA_ENABLE_FLAG) {
4624 struct tstorm_eth_tpa_exist tpa = {0};
4628 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4630 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4634 /* Zero this manually as its initialization is
4635 currently missing in the initTool */
4636 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4637 REG_WR(bp, BAR_USTRORM_INTMEM +
4638 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4641 static void bnx2x_init_internal_port(struct bnx2x *bp)
4643 int port = BP_PORT(bp);
4645 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4646 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4647 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4648 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4651 static void bnx2x_init_internal_func(struct bnx2x *bp)
4653 struct tstorm_eth_function_common_config tstorm_config = {0};
4654 struct stats_indication_flags stats_flags = {0};
4655 int port = BP_PORT(bp);
4656 int func = BP_FUNC(bp);
4661 tstorm_config.config_flags = MULTI_FLAGS;
4662 tstorm_config.rss_result_mask = MULTI_MASK;
4665 tstorm_config.leading_client_id = BP_L_ID(bp);
4667 REG_WR(bp, BAR_TSTRORM_INTMEM +
4668 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4669 (*(u32 *)&tstorm_config));
4671 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4672 bnx2x_set_storm_rx_mode(bp);
4674 /* reset xstorm per client statistics */
4675 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4676 REG_WR(bp, BAR_XSTRORM_INTMEM +
4677 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4680 /* reset tstorm per client statistics */
4681 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4682 REG_WR(bp, BAR_TSTRORM_INTMEM +
4683 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4687 /* Init statistics related context */
4688 stats_flags.collect_eth = 1;
4690 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4691 ((u32 *)&stats_flags)[0]);
4692 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4693 ((u32 *)&stats_flags)[1]);
4695 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4696 ((u32 *)&stats_flags)[0]);
4697 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4698 ((u32 *)&stats_flags)[1]);
4700 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4701 ((u32 *)&stats_flags)[0]);
4702 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4703 ((u32 *)&stats_flags)[1]);
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4707 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4708 REG_WR(bp, BAR_XSTRORM_INTMEM +
4709 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4710 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4712 REG_WR(bp, BAR_TSTRORM_INTMEM +
4713 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4714 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4715 REG_WR(bp, BAR_TSTRORM_INTMEM +
4716 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4717 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4719 if (CHIP_IS_E1H(bp)) {
4720 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4722 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4724 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4726 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4729 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4733 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4735 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4736 SGE_PAGE_SIZE * PAGES_PER_SGE),
4738 for_each_queue(bp, i) {
4739 struct bnx2x_fastpath *fp = &bp->fp[i];
4741 REG_WR(bp, BAR_USTRORM_INTMEM +
4742 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4743 U64_LO(fp->rx_comp_mapping));
4744 REG_WR(bp, BAR_USTRORM_INTMEM +
4745 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4746 U64_HI(fp->rx_comp_mapping));
4748 REG_WR16(bp, BAR_USTRORM_INTMEM +
4749 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4754 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4756 switch (load_code) {
4757 case FW_MSG_CODE_DRV_LOAD_COMMON:
4758 bnx2x_init_internal_common(bp);
4761 case FW_MSG_CODE_DRV_LOAD_PORT:
4762 bnx2x_init_internal_port(bp);
4765 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4766 bnx2x_init_internal_func(bp);
4770 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4775 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4779 for_each_queue(bp, i) {
4780 struct bnx2x_fastpath *fp = &bp->fp[i];
4783 fp->state = BNX2X_FP_STATE_CLOSED;
4785 fp->cl_id = BP_L_ID(bp) + i;
4786 fp->sb_id = fp->cl_id;
4788 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4789 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4790 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4792 bnx2x_update_fpsb_idx(fp);
4795 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4797 bnx2x_update_dsb_idx(bp);
4798 bnx2x_update_coalesce(bp);
4799 bnx2x_init_rx_rings(bp);
4800 bnx2x_init_tx_ring(bp);
4801 bnx2x_init_sp_ring(bp);
4802 bnx2x_init_context(bp);
4803 bnx2x_init_internal(bp, load_code);
4804 bnx2x_init_ind_table(bp);
4805 bnx2x_int_enable(bp);
4808 /* end of nic init */
4811 * gzip service functions
4814 static int bnx2x_gunzip_init(struct bnx2x *bp)
4816 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4817 &bp->gunzip_mapping);
4818 if (bp->gunzip_buf == NULL)
4821 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4822 if (bp->strm == NULL)
4825 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4827 if (bp->strm->workspace == NULL)
4837 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4838 bp->gunzip_mapping);
4839 bp->gunzip_buf = NULL;
4842 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4843 " un-compression\n", bp->dev->name);
4847 static void bnx2x_gunzip_end(struct bnx2x *bp)
4849 kfree(bp->strm->workspace);
4854 if (bp->gunzip_buf) {
4855 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4856 bp->gunzip_mapping);
4857 bp->gunzip_buf = NULL;
4861 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4865 /* check gzip header */
4866 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4873 if (zbuf[3] & FNAME)
4874 while ((zbuf[n++] != 0) && (n < len));
4876 bp->strm->next_in = zbuf + n;
4877 bp->strm->avail_in = len - n;
4878 bp->strm->next_out = bp->gunzip_buf;
4879 bp->strm->avail_out = FW_BUF_SIZE;
4881 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4885 rc = zlib_inflate(bp->strm, Z_FINISH);
4886 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4887 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4888 bp->dev->name, bp->strm->msg);
4890 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4891 if (bp->gunzip_outlen & 0x3)
4892 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4893 " gunzip_outlen (%d) not aligned\n",
4894 bp->dev->name, bp->gunzip_outlen);
4895 bp->gunzip_outlen >>= 2;
4897 zlib_inflateEnd(bp->strm);
4899 if (rc == Z_STREAM_END)
4905 /* nic load/unload */
4908 * General service functions
4911 /* send a NIG loopback debug packet */
4912 static void bnx2x_lb_pckt(struct bnx2x *bp)
4916 /* Ethernet source and destination addresses */
4917 wb_write[0] = 0x55555555;
4918 wb_write[1] = 0x55555555;
4919 wb_write[2] = 0x20; /* SOP */
4920 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4922 /* NON-IP protocol */
4923 wb_write[0] = 0x09000000;
4924 wb_write[1] = 0x55555555;
4925 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4926 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4929 /* some of the internal memories
4930 * are not directly readable from the driver
4931 * to test them we send debug packets
4933 static int bnx2x_int_mem_test(struct bnx2x *bp)
4939 if (CHIP_REV_IS_FPGA(bp))
4941 else if (CHIP_REV_IS_EMUL(bp))
4946 DP(NETIF_MSG_HW, "start part1\n");
4948 /* Disable inputs of parser neighbor blocks */
4949 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4950 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4951 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4952 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4954 /* Write 0 to parser credits for CFC search request */
4955 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4957 /* send Ethernet packet */
4960 /* TODO do i reset NIG statistic? */
4961 /* Wait until NIG register shows 1 packet of size 0x10 */
4962 count = 1000 * factor;
4965 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4966 val = *bnx2x_sp(bp, wb_data[0]);
4974 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4978 /* Wait until PRS register shows 1 packet */
4979 count = 1000 * factor;
4981 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4989 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4993 /* Reset and init BRB, PRS */
4994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4996 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4998 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4999 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5001 DP(NETIF_MSG_HW, "part2\n");
5003 /* Disable inputs of parser neighbor blocks */
5004 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5005 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5006 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5007 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5009 /* Write 0 to parser credits for CFC search request */
5010 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5012 /* send 10 Ethernet packets */
5013 for (i = 0; i < 10; i++)
5016 /* Wait until NIG register shows 10 + 1
5017 packets of size 11*0x10 = 0xb0 */
5018 count = 1000 * factor;
5021 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5022 val = *bnx2x_sp(bp, wb_data[0]);
5030 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5034 /* Wait until PRS register shows 2 packets */
5035 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5037 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5039 /* Write 1 to parser credits for CFC search request */
5040 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5042 /* Wait until PRS register shows 3 packets */
5043 msleep(10 * factor);
5044 /* Wait until NIG register shows 1 packet of size 0x10 */
5045 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5047 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5049 /* clear NIG EOP FIFO */
5050 for (i = 0; i < 11; i++)
5051 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5052 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5054 BNX2X_ERR("clear of NIG failed\n");
5058 /* Reset and init BRB, PRS, NIG */
5059 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5061 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5063 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5064 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5067 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5070 /* Enable inputs of parser neighbor blocks */
5071 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5072 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5073 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5074 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5076 DP(NETIF_MSG_HW, "done\n");
5081 static void enable_blocks_attention(struct bnx2x *bp)
5083 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5084 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5085 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5086 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5087 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5088 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5089 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5090 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5091 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5092 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5093 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5094 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5095 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5096 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5097 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5098 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5099 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5100 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5101 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5102 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5103 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5104 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5105 if (CHIP_REV_IS_FPGA(bp))
5106 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5108 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5109 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5110 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5111 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5112 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5113 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5114 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5115 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5116 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5117 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5121 static int bnx2x_init_common(struct bnx2x *bp)
5125 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5127 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5128 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5130 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5131 if (CHIP_IS_E1H(bp))
5132 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5134 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5136 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5138 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5139 if (CHIP_IS_E1(bp)) {
5140 /* enable HW interrupt from PXP on USDM overflow
5141 bit 16 on INT_MASK_0 */
5142 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5145 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5149 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5150 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5151 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5152 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5153 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5154 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5156 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5157 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5158 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5159 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5160 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5163 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5165 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5166 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5167 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5170 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5171 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5173 /* let the HW do it's magic ... */
5175 /* finish PXP init */
5176 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5178 BNX2X_ERR("PXP2 CFG failed\n");
5181 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5183 BNX2X_ERR("PXP2 RD_INIT failed\n");
5187 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5188 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5190 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5192 /* clean the DMAE memory */
5194 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5196 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5197 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5198 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5199 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5201 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5202 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5203 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5204 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5206 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5207 /* soft reset pulse */
5208 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5209 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5212 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5215 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5216 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5217 if (!CHIP_REV_IS_SLOW(bp)) {
5218 /* enable hw interrupt from doorbell Q */
5219 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5222 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5223 if (CHIP_REV_IS_SLOW(bp)) {
5224 /* fix for emulation and FPGA for no pause */
5225 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5226 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5227 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5228 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5231 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5233 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5234 if (CHIP_IS_E1H(bp))
5235 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5237 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5238 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5239 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5240 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5242 if (CHIP_IS_E1H(bp)) {
5243 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5244 STORM_INTMEM_SIZE_E1H/2);
5246 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5247 0, STORM_INTMEM_SIZE_E1H/2);
5248 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5249 STORM_INTMEM_SIZE_E1H/2);
5251 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5252 0, STORM_INTMEM_SIZE_E1H/2);
5253 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5254 STORM_INTMEM_SIZE_E1H/2);
5256 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5257 0, STORM_INTMEM_SIZE_E1H/2);
5258 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5259 STORM_INTMEM_SIZE_E1H/2);
5261 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5262 0, STORM_INTMEM_SIZE_E1H/2);
5264 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5265 STORM_INTMEM_SIZE_E1);
5266 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5267 STORM_INTMEM_SIZE_E1);
5268 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5269 STORM_INTMEM_SIZE_E1);
5270 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5271 STORM_INTMEM_SIZE_E1);
5274 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5275 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5276 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5277 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5280 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5282 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5285 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5286 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5287 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5289 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5290 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5291 REG_WR(bp, i, 0xc0cac01a);
5292 /* TODO: replace with something meaningful */
5294 if (CHIP_IS_E1H(bp))
5295 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5296 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5298 if (sizeof(union cdu_context) != 1024)
5299 /* we currently assume that a context is 1024 bytes */
5300 printk(KERN_ALERT PFX "please adjust the size of"
5301 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5303 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5304 val = (4 << 24) + (0 << 12) + 1024;
5305 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5306 if (CHIP_IS_E1(bp)) {
5307 /* !!! fix pxp client crdit until excel update */
5308 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5309 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5312 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5313 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5315 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5316 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5318 /* PXPCS COMMON comes here */
5319 /* Reset PCIE errors for debug */
5320 REG_WR(bp, 0x2814, 0xffffffff);
5321 REG_WR(bp, 0x3820, 0xffffffff);
5323 /* EMAC0 COMMON comes here */
5324 /* EMAC1 COMMON comes here */
5325 /* DBU COMMON comes here */
5326 /* DBG COMMON comes here */
5328 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5329 if (CHIP_IS_E1H(bp)) {
5330 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5331 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5334 if (CHIP_REV_IS_SLOW(bp))
5337 /* finish CFC init */
5338 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5340 BNX2X_ERR("CFC LL_INIT failed\n");
5343 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5345 BNX2X_ERR("CFC AC_INIT failed\n");
5348 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5350 BNX2X_ERR("CFC CAM_INIT failed\n");
5353 REG_WR(bp, CFC_REG_DEBUG0, 0);
5355 /* read NIG statistic
5356 to see if this is our first up since powerup */
5357 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5358 val = *bnx2x_sp(bp, wb_data[0]);
5360 /* do internal memory self test */
5361 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5362 BNX2X_ERR("internal mem self test failed\n");
5366 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5367 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5368 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5369 /* Fan failure is indicated by SPIO 5 */
5370 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5371 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5373 /* set to active low mode */
5374 val = REG_RD(bp, MISC_REG_SPIO_INT);
5375 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5376 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5377 REG_WR(bp, MISC_REG_SPIO_INT, val);
5379 /* enable interrupt to signal the IGU */
5380 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5381 val |= (1 << MISC_REGISTERS_SPIO_5);
5382 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5389 /* clear PXP2 attentions */
5390 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5392 enable_blocks_attention(bp);
5394 if (!BP_NOMCP(bp)) {
5395 bnx2x_acquire_phy_lock(bp);
5396 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5397 bnx2x_release_phy_lock(bp);
5399 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5404 static int bnx2x_init_port(struct bnx2x *bp)
5406 int port = BP_PORT(bp);
5409 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5411 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5413 /* Port PXP comes here */
5414 /* Port PXP2 comes here */
5419 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5420 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5421 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5422 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5427 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5428 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5429 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5430 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5435 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5436 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5437 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5438 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5440 /* Port CMs come here */
5442 /* Port QM comes here */
5444 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5445 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5447 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5448 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5450 /* Port DQ comes here */
5451 /* Port BRB1 comes here */
5452 /* Port PRS comes here */
5453 /* Port TSDM comes here */
5454 /* Port CSDM comes here */
5455 /* Port USDM comes here */
5456 /* Port XSDM comes here */
5457 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5458 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5459 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5460 port ? USEM_PORT1_END : USEM_PORT0_END);
5461 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5462 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5463 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5464 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5465 /* Port UPB comes here */
5466 /* Port XPB comes here */
5468 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5469 port ? PBF_PORT1_END : PBF_PORT0_END);
5471 /* configure PBF to work without PAUSE mtu 9000 */
5472 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5474 /* update threshold */
5475 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5476 /* update init credit */
5477 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5480 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5482 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5485 /* tell the searcher where the T2 table is */
5486 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5488 wb_write[0] = U64_LO(bp->t2_mapping);
5489 wb_write[1] = U64_HI(bp->t2_mapping);
5490 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5491 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5492 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5493 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5495 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5496 /* Port SRCH comes here */
5498 /* Port CDU comes here */
5499 /* Port CFC comes here */
5501 if (CHIP_IS_E1(bp)) {
5502 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5503 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5505 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5506 port ? HC_PORT1_END : HC_PORT0_END);
5508 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5509 MISC_AEU_PORT0_START,
5510 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5511 /* init aeu_mask_attn_func_0/1:
5512 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5513 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5514 * bits 4-7 are used for "per vn group attention" */
5515 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5516 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5518 /* Port PXPCS comes here */
5519 /* Port EMAC0 comes here */
5520 /* Port EMAC1 comes here */
5521 /* Port DBU comes here */
5522 /* Port DBG comes here */
5523 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5524 port ? NIG_PORT1_END : NIG_PORT0_END);
5526 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5528 if (CHIP_IS_E1H(bp)) {
5530 struct cmng_struct_per_port m_cmng_port;
5533 /* 0x2 disable e1hov, 0x1 enable */
5534 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5535 (IS_E1HMF(bp) ? 0x1 : 0x2));
5537 /* Init RATE SHAPING and FAIRNESS contexts.
5538 Initialize as if there is 10G link. */
5539 wsum = bnx2x_calc_vn_wsum(bp);
5540 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5542 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5543 bnx2x_init_vn_minmax(bp, 2*vn + port,
5544 wsum, 10000, &m_cmng_port);
5547 /* Port MCP comes here */
5548 /* Port DMAE comes here */
5550 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5551 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5552 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5553 /* add SPIO 5 to group 0 */
5554 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5555 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5556 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5563 bnx2x__link_reset(bp);
5568 #define ILT_PER_FUNC (768/2)
5569 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5570 /* the phys address is shifted right 12 bits and has an added
5571 1=valid bit added to the 53rd bit
5572 then since this is a wide register(TM)
5573 we split it into two 32 bit writes
5575 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5576 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5577 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5578 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5580 #define CNIC_ILT_LINES 0
5582 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5586 if (CHIP_IS_E1H(bp))
5587 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5589 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5591 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5594 static int bnx2x_init_func(struct bnx2x *bp)
5596 int port = BP_PORT(bp);
5597 int func = BP_FUNC(bp);
5600 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5602 i = FUNC_ILT_BASE(func);
5604 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5605 if (CHIP_IS_E1H(bp)) {
5606 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5607 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5609 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5610 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5613 if (CHIP_IS_E1H(bp)) {
5614 for (i = 0; i < 9; i++)
5615 bnx2x_init_block(bp,
5616 cm_start[func][i], cm_end[func][i]);
5618 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5619 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5622 /* HC init per function */
5623 if (CHIP_IS_E1H(bp)) {
5624 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5626 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5629 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5631 if (CHIP_IS_E1H(bp))
5632 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5634 /* Reset PCIE errors for debug */
5635 REG_WR(bp, 0x2114, 0xffffffff);
5636 REG_WR(bp, 0x2120, 0xffffffff);
5641 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5645 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5646 BP_FUNC(bp), load_code);
5649 mutex_init(&bp->dmae_mutex);
5650 bnx2x_gunzip_init(bp);
5652 switch (load_code) {
5653 case FW_MSG_CODE_DRV_LOAD_COMMON:
5654 rc = bnx2x_init_common(bp);
5659 case FW_MSG_CODE_DRV_LOAD_PORT:
5661 rc = bnx2x_init_port(bp);
5666 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5668 rc = bnx2x_init_func(bp);
5674 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5678 if (!BP_NOMCP(bp)) {
5679 int func = BP_FUNC(bp);
5681 bp->fw_drv_pulse_wr_seq =
5682 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5683 DRV_PULSE_SEQ_MASK);
5684 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5685 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5686 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5690 /* this needs to be done before gunzip end */
5691 bnx2x_zero_def_sb(bp);
5692 for_each_queue(bp, i)
5693 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5696 bnx2x_gunzip_end(bp);
5701 /* send the MCP a request, block until there is a reply */
5702 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5704 int func = BP_FUNC(bp);
5705 u32 seq = ++bp->fw_seq;
5708 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5710 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5711 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5714 /* let the FW do it's magic ... */
5717 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5719 /* Give the FW up to 2 second (200*10ms) */
5720 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5722 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5723 cnt*delay, rc, seq);
5725 /* is this a reply to our command? */
5726 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5727 rc &= FW_MSG_CODE_MASK;
5731 BNX2X_ERR("FW failed to respond!\n");
5739 static void bnx2x_free_mem(struct bnx2x *bp)
5742 #define BNX2X_PCI_FREE(x, y, size) \
5745 pci_free_consistent(bp->pdev, size, x, y); \
5751 #define BNX2X_FREE(x) \
5762 for_each_queue(bp, i) {
5765 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5766 bnx2x_fp(bp, i, status_blk_mapping),
5767 sizeof(struct host_status_block) +
5768 sizeof(struct eth_tx_db_data));
5770 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5771 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5772 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5773 bnx2x_fp(bp, i, tx_desc_mapping),
5774 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5776 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5777 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5778 bnx2x_fp(bp, i, rx_desc_mapping),
5779 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5781 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5782 bnx2x_fp(bp, i, rx_comp_mapping),
5783 sizeof(struct eth_fast_path_rx_cqe) *
5787 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5788 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5789 bnx2x_fp(bp, i, rx_sge_mapping),
5790 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5792 /* end of fastpath */
5794 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5795 sizeof(struct host_def_status_block));
5797 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5798 sizeof(struct bnx2x_slowpath));
5801 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5802 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5803 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5804 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5806 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5808 #undef BNX2X_PCI_FREE
5812 static int bnx2x_alloc_mem(struct bnx2x *bp)
5815 #define BNX2X_PCI_ALLOC(x, y, size) \
5817 x = pci_alloc_consistent(bp->pdev, size, y); \
5819 goto alloc_mem_err; \
5820 memset(x, 0, size); \
5823 #define BNX2X_ALLOC(x, size) \
5825 x = vmalloc(size); \
5827 goto alloc_mem_err; \
5828 memset(x, 0, size); \
5834 for_each_queue(bp, i) {
5835 bnx2x_fp(bp, i, bp) = bp;
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5839 &bnx2x_fp(bp, i, status_blk_mapping),
5840 sizeof(struct host_status_block) +
5841 sizeof(struct eth_tx_db_data));
5843 bnx2x_fp(bp, i, hw_tx_prods) =
5844 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5846 bnx2x_fp(bp, i, tx_prods_mapping) =
5847 bnx2x_fp(bp, i, status_blk_mapping) +
5848 sizeof(struct host_status_block);
5850 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5851 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5852 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5853 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5854 &bnx2x_fp(bp, i, tx_desc_mapping),
5855 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5857 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5858 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5859 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5860 &bnx2x_fp(bp, i, rx_desc_mapping),
5861 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5863 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5864 &bnx2x_fp(bp, i, rx_comp_mapping),
5865 sizeof(struct eth_fast_path_rx_cqe) *
5869 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5870 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5871 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5872 &bnx2x_fp(bp, i, rx_sge_mapping),
5873 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5875 /* end of fastpath */
5877 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5878 sizeof(struct host_def_status_block));
5880 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5881 sizeof(struct bnx2x_slowpath));
5884 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5887 for (i = 0; i < 64*1024; i += 64) {
5888 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5889 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5892 /* allocate searcher T2 table
5893 we allocate 1/4 of alloc num for T2
5894 (which is not entered into the ILT) */
5895 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5898 for (i = 0; i < 16*1024; i += 64)
5899 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5901 /* now fixup the last line in the block to point to the next block */
5902 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5904 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5905 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5907 /* QM queues (128*MAX_CONN) */
5908 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5911 /* Slow path ring */
5912 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5920 #undef BNX2X_PCI_ALLOC
5924 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5928 for_each_queue(bp, i) {
5929 struct bnx2x_fastpath *fp = &bp->fp[i];
5931 u16 bd_cons = fp->tx_bd_cons;
5932 u16 sw_prod = fp->tx_pkt_prod;
5933 u16 sw_cons = fp->tx_pkt_cons;
5935 while (sw_cons != sw_prod) {
5936 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5942 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5946 for_each_queue(bp, j) {
5947 struct bnx2x_fastpath *fp = &bp->fp[j];
5949 for (i = 0; i < NUM_RX_BD; i++) {
5950 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5951 struct sk_buff *skb = rx_buf->skb;
5956 pci_unmap_single(bp->pdev,
5957 pci_unmap_addr(rx_buf, mapping),
5959 PCI_DMA_FROMDEVICE);
5964 if (!fp->disable_tpa)
5965 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5966 ETH_MAX_AGGREGATION_QUEUES_E1 :
5967 ETH_MAX_AGGREGATION_QUEUES_E1H);
5971 static void bnx2x_free_skbs(struct bnx2x *bp)
5973 bnx2x_free_tx_skbs(bp);
5974 bnx2x_free_rx_skbs(bp);
5977 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5981 free_irq(bp->msix_table[0].vector, bp->dev);
5982 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5983 bp->msix_table[0].vector);
5985 for_each_queue(bp, i) {
5986 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5987 "state %x\n", i, bp->msix_table[i + offset].vector,
5988 bnx2x_fp(bp, i, state));
5990 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5991 BNX2X_ERR("IRQ of fp #%d being freed while "
5992 "state != closed\n", i);
5994 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5998 static void bnx2x_free_irq(struct bnx2x *bp)
6000 if (bp->flags & USING_MSIX_FLAG) {
6001 bnx2x_free_msix_irqs(bp);
6002 pci_disable_msix(bp->pdev);
6003 bp->flags &= ~USING_MSIX_FLAG;
6006 free_irq(bp->pdev->irq, bp->dev);
6009 static int bnx2x_enable_msix(struct bnx2x *bp)
6013 bp->msix_table[0].entry = 0;
6015 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6017 for_each_queue(bp, i) {
6018 int igu_vec = offset + i + BP_L_ID(bp);
6020 bp->msix_table[i + offset].entry = igu_vec;
6021 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6022 "(fastpath #%u)\n", i + offset, igu_vec, i);
6025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6026 bp->num_queues + offset);
6028 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6031 bp->flags |= USING_MSIX_FLAG;
6036 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6038 int i, rc, offset = 1;
6040 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6041 bp->dev->name, bp->dev);
6043 BNX2X_ERR("request sp irq failed\n");
6047 for_each_queue(bp, i) {
6048 rc = request_irq(bp->msix_table[i + offset].vector,
6049 bnx2x_msix_fp_int, 0,
6050 bp->dev->name, &bp->fp[i]);
6052 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6054 bnx2x_free_msix_irqs(bp);
6058 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6064 static int bnx2x_req_irq(struct bnx2x *bp)
6068 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6069 bp->dev->name, bp->dev);
6071 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6076 static void bnx2x_napi_enable(struct bnx2x *bp)
6080 for_each_queue(bp, i)
6081 napi_enable(&bnx2x_fp(bp, i, napi));
6084 static void bnx2x_napi_disable(struct bnx2x *bp)
6088 for_each_queue(bp, i)
6089 napi_disable(&bnx2x_fp(bp, i, napi));
6092 static void bnx2x_netif_start(struct bnx2x *bp)
6094 if (atomic_dec_and_test(&bp->intr_sem)) {
6095 if (netif_running(bp->dev)) {
6096 if (bp->state == BNX2X_STATE_OPEN)
6097 netif_wake_queue(bp->dev);
6098 bnx2x_napi_enable(bp);
6099 bnx2x_int_enable(bp);
6104 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6106 bnx2x_int_disable_sync(bp, disable_hw);
6107 if (netif_running(bp->dev)) {
6108 bnx2x_napi_disable(bp);
6109 netif_tx_disable(bp->dev);
6110 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6115 * Init service functions
6118 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6120 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6121 int port = BP_PORT(bp);
6124 * unicasts 0-31:port0 32-63:port1
6125 * multicast 64-127:port0 128-191:port1
6127 config->hdr.length_6b = 2;
6128 config->hdr.offset = port ? 31 : 0;
6129 config->hdr.client_id = BP_CL_ID(bp);
6130 config->hdr.reserved1 = 0;
6133 config->config_table[0].cam_entry.msb_mac_addr =
6134 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6135 config->config_table[0].cam_entry.middle_mac_addr =
6136 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6137 config->config_table[0].cam_entry.lsb_mac_addr =
6138 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6139 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6141 config->config_table[0].target_table_entry.flags = 0;
6143 CAM_INVALIDATE(config->config_table[0]);
6144 config->config_table[0].target_table_entry.client_id = 0;
6145 config->config_table[0].target_table_entry.vlan_id = 0;
6147 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6148 (set ? "setting" : "clearing"),
6149 config->config_table[0].cam_entry.msb_mac_addr,
6150 config->config_table[0].cam_entry.middle_mac_addr,
6151 config->config_table[0].cam_entry.lsb_mac_addr);
6154 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6155 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6156 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6157 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6159 config->config_table[1].target_table_entry.flags =
6160 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6162 CAM_INVALIDATE(config->config_table[1]);
6163 config->config_table[1].target_table_entry.client_id = 0;
6164 config->config_table[1].target_table_entry.vlan_id = 0;
6166 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6167 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6168 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6171 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6173 struct mac_configuration_cmd_e1h *config =
6174 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6176 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6177 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6181 /* CAM allocation for E1H
6182 * unicasts: by func number
6183 * multicast: 20+FUNC*20, 20 each
6185 config->hdr.length_6b = 1;
6186 config->hdr.offset = BP_FUNC(bp);
6187 config->hdr.client_id = BP_CL_ID(bp);
6188 config->hdr.reserved1 = 0;
6191 config->config_table[0].msb_mac_addr =
6192 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6193 config->config_table[0].middle_mac_addr =
6194 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6195 config->config_table[0].lsb_mac_addr =
6196 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6197 config->config_table[0].client_id = BP_L_ID(bp);
6198 config->config_table[0].vlan_id = 0;
6199 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6201 config->config_table[0].flags = BP_PORT(bp);
6203 config->config_table[0].flags =
6204 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6206 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6207 (set ? "setting" : "clearing"),
6208 config->config_table[0].msb_mac_addr,
6209 config->config_table[0].middle_mac_addr,
6210 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6212 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6213 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6214 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6217 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6218 int *state_p, int poll)
6220 /* can take a while if any port is running */
6223 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6224 poll ? "polling" : "waiting", state, idx);
6229 bnx2x_rx_int(bp->fp, 10);
6230 /* if index is different from 0
6231 * the reply for some commands will
6232 * be on the non default queue
6235 bnx2x_rx_int(&bp->fp[idx], 10);
6238 mb(); /* state is changed by bnx2x_sp_event() */
6239 if (*state_p == state)
6246 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6247 poll ? "polling" : "waiting", state, idx);
6248 #ifdef BNX2X_STOP_ON_ERROR
6255 static int bnx2x_setup_leading(struct bnx2x *bp)
6259 /* reset IGU state */
6260 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6263 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6265 /* Wait for completion */
6266 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6271 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6273 /* reset IGU state */
6274 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6277 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6278 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6280 /* Wait for completion */
6281 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6282 &(bp->fp[index].state), 0);
6285 static int bnx2x_poll(struct napi_struct *napi, int budget);
6286 static void bnx2x_set_rx_mode(struct net_device *dev);
6288 /* must be called with rtnl_lock */
6289 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6293 #ifdef BNX2X_STOP_ON_ERROR
6294 if (unlikely(bp->panic))
6298 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6300 /* Send LOAD_REQUEST command to MCP
6301 Returns the type of LOAD command:
6302 if it is the first port to be initialized
6303 common blocks should be initialized, otherwise - not
6305 if (!BP_NOMCP(bp)) {
6306 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6308 BNX2X_ERR("MCP response failure, aborting\n");
6311 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6312 return -EBUSY; /* other port in diagnostic mode */
6315 int port = BP_PORT(bp);
6317 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6318 load_count[0], load_count[1], load_count[2]);
6320 load_count[1 + port]++;
6321 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6322 load_count[0], load_count[1], load_count[2]);
6323 if (load_count[0] == 1)
6324 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6325 else if (load_count[1 + port] == 1)
6326 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6328 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6331 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6332 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6336 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6338 /* if we can't use MSI-X we only need one fp,
6339 * so try to enable MSI-X with the requested number of fp's
6340 * and fallback to inta with one fp
6346 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6347 /* user requested number */
6348 bp->num_queues = use_multi;
6351 bp->num_queues = min_t(u32, num_online_cpus(),
6356 if (bnx2x_enable_msix(bp)) {
6357 /* failed to enable MSI-X */
6360 BNX2X_ERR("Multi requested but failed"
6361 " to enable MSI-X\n");
6365 "set number of queues to %d\n", bp->num_queues);
6367 if (bnx2x_alloc_mem(bp))
6370 for_each_queue(bp, i)
6371 bnx2x_fp(bp, i, disable_tpa) =
6372 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6374 if (bp->flags & USING_MSIX_FLAG) {
6375 rc = bnx2x_req_msix_irqs(bp);
6377 pci_disable_msix(bp->pdev);
6382 rc = bnx2x_req_irq(bp);
6384 BNX2X_ERR("IRQ request failed, aborting\n");
6389 for_each_queue(bp, i)
6390 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6394 rc = bnx2x_init_hw(bp, load_code);
6396 BNX2X_ERR("HW init failed, aborting\n");
6397 goto load_int_disable;
6400 /* Setup NIC internals and enable interrupts */
6401 bnx2x_nic_init(bp, load_code);
6403 /* Send LOAD_DONE command to MCP */
6404 if (!BP_NOMCP(bp)) {
6405 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6407 BNX2X_ERR("MCP response failure, aborting\n");
6409 goto load_rings_free;
6413 bnx2x_stats_init(bp);
6415 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6417 /* Enable Rx interrupt handling before sending the ramrod
6418 as it's completed on Rx FP queue */
6419 bnx2x_napi_enable(bp);
6421 /* Enable interrupt handling */
6422 atomic_set(&bp->intr_sem, 0);
6424 rc = bnx2x_setup_leading(bp);
6426 BNX2X_ERR("Setup leading failed!\n");
6427 goto load_netif_stop;
6430 if (CHIP_IS_E1H(bp))
6431 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6432 BNX2X_ERR("!!! mf_cfg function disabled\n");
6433 bp->state = BNX2X_STATE_DISABLED;
6436 if (bp->state == BNX2X_STATE_OPEN)
6437 for_each_nondefault_queue(bp, i) {
6438 rc = bnx2x_setup_multi(bp, i);
6440 goto load_netif_stop;
6444 bnx2x_set_mac_addr_e1(bp, 1);
6446 bnx2x_set_mac_addr_e1h(bp, 1);
6449 bnx2x_initial_phy_init(bp);
6451 /* Start fast path */
6452 switch (load_mode) {
6454 /* Tx queue should be only reenabled */
6455 netif_wake_queue(bp->dev);
6456 bnx2x_set_rx_mode(bp->dev);
6460 netif_start_queue(bp->dev);
6461 bnx2x_set_rx_mode(bp->dev);
6462 if (bp->flags & USING_MSIX_FLAG)
6463 printk(KERN_INFO PFX "%s: using MSI-X\n",
6468 bnx2x_set_rx_mode(bp->dev);
6469 bp->state = BNX2X_STATE_DIAG;
6477 bnx2x__link_status_update(bp);
6479 /* start the timer */
6480 mod_timer(&bp->timer, jiffies + bp->current_interval);
6486 bnx2x_napi_disable(bp);
6488 /* Free SKBs, SGEs, TPA pool and driver internals */
6489 bnx2x_free_skbs(bp);
6490 for_each_queue(bp, i)
6491 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6493 bnx2x_int_disable_sync(bp, 1);
6500 /* TBD we really need to reset the chip
6501 if we want to recover from this */
6505 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6509 /* halt the connection */
6510 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6511 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6513 /* Wait for completion */
6514 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6515 &(bp->fp[index].state), 1);
6516 if (rc) /* timeout */
6519 /* delete cfc entry */
6520 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6522 /* Wait for completion */
6523 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6524 &(bp->fp[index].state), 1);
6528 static int bnx2x_stop_leading(struct bnx2x *bp)
6530 u16 dsb_sp_prod_idx;
6531 /* if the other port is handling traffic,
6532 this can take a lot of time */
6538 /* Send HALT ramrod */
6539 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6540 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6542 /* Wait for completion */
6543 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6544 &(bp->fp[0].state), 1);
6545 if (rc) /* timeout */
6548 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6550 /* Send PORT_DELETE ramrod */
6551 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6553 /* Wait for completion to arrive on default status block
6554 we are going to reset the chip anyway
6555 so there is not much to do if this times out
6557 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6559 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6560 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6561 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6562 #ifdef BNX2X_STOP_ON_ERROR
6572 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6573 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6578 static void bnx2x_reset_func(struct bnx2x *bp)
6580 int port = BP_PORT(bp);
6581 int func = BP_FUNC(bp);
6585 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6586 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6588 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6591 base = FUNC_ILT_BASE(func);
6592 for (i = base; i < base + ILT_PER_FUNC; i++)
6593 bnx2x_ilt_wr(bp, i, 0);
6596 static void bnx2x_reset_port(struct bnx2x *bp)
6598 int port = BP_PORT(bp);
6601 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6603 /* Do not rcv packets to BRB */
6604 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6605 /* Do not direct rcv packets that are not for MCP to the BRB */
6606 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6607 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6610 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6613 /* Check for BRB port occupancy */
6614 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6616 DP(NETIF_MSG_IFDOWN,
6617 "BRB1 is not empty %d blocks are occupied\n", val);
6619 /* TODO: Close Doorbell port? */
6622 static void bnx2x_reset_common(struct bnx2x *bp)
6625 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6627 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6630 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6632 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6633 BP_FUNC(bp), reset_code);
6635 switch (reset_code) {
6636 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6637 bnx2x_reset_port(bp);
6638 bnx2x_reset_func(bp);
6639 bnx2x_reset_common(bp);
6642 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6643 bnx2x_reset_port(bp);
6644 bnx2x_reset_func(bp);
6647 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6648 bnx2x_reset_func(bp);
6652 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6657 /* must be called with rtnl_lock */
6658 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6660 int port = BP_PORT(bp);
6664 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6666 bp->rx_mode = BNX2X_RX_MODE_NONE;
6667 bnx2x_set_storm_rx_mode(bp);
6669 bnx2x_netif_stop(bp, 1);
6670 if (!netif_running(bp->dev))
6671 bnx2x_napi_disable(bp);
6672 del_timer_sync(&bp->timer);
6673 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6674 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6675 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6677 /* Wait until tx fast path tasks complete */
6678 for_each_queue(bp, i) {
6679 struct bnx2x_fastpath *fp = &bp->fp[i];
6683 while (BNX2X_HAS_TX_WORK(fp)) {
6685 bnx2x_tx_int(fp, 1000);
6687 BNX2X_ERR("timeout waiting for queue[%d]\n",
6689 #ifdef BNX2X_STOP_ON_ERROR
6701 /* Give HW time to discard old tx messages */
6707 if (CHIP_IS_E1(bp)) {
6708 struct mac_configuration_cmd *config =
6709 bnx2x_sp(bp, mcast_config);
6711 bnx2x_set_mac_addr_e1(bp, 0);
6713 for (i = 0; i < config->hdr.length_6b; i++)
6714 CAM_INVALIDATE(config->config_table[i]);
6716 config->hdr.length_6b = i;
6717 if (CHIP_REV_IS_SLOW(bp))
6718 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6720 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6721 config->hdr.client_id = BP_CL_ID(bp);
6722 config->hdr.reserved1 = 0;
6724 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6725 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6726 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6729 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6731 bnx2x_set_mac_addr_e1h(bp, 0);
6733 for (i = 0; i < MC_HASH_SIZE; i++)
6734 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6737 if (unload_mode == UNLOAD_NORMAL)
6738 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6740 else if (bp->flags & NO_WOL_FLAG) {
6741 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6742 if (CHIP_IS_E1H(bp))
6743 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6745 } else if (bp->wol) {
6746 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6747 u8 *mac_addr = bp->dev->dev_addr;
6749 /* The mac address is written to entries 1-4 to
6750 preserve entry 0 which is used by the PMF */
6751 u8 entry = (BP_E1HVN(bp) + 1)*8;
6753 val = (mac_addr[0] << 8) | mac_addr[1];
6754 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6756 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6757 (mac_addr[4] << 8) | mac_addr[5];
6758 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6760 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6763 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6765 /* Close multi and leading connections
6766 Completions for ramrods are collected in a synchronous way */
6767 for_each_nondefault_queue(bp, i)
6768 if (bnx2x_stop_multi(bp, i))
6771 rc = bnx2x_stop_leading(bp);
6773 BNX2X_ERR("Stop leading failed!\n");
6774 #ifdef BNX2X_STOP_ON_ERROR
6783 reset_code = bnx2x_fw_command(bp, reset_code);
6785 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6786 load_count[0], load_count[1], load_count[2]);
6788 load_count[1 + port]--;
6789 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6790 load_count[0], load_count[1], load_count[2]);
6791 if (load_count[0] == 0)
6792 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6793 else if (load_count[1 + port] == 0)
6794 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6796 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6799 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6800 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6801 bnx2x__link_reset(bp);
6803 /* Reset the chip */
6804 bnx2x_reset_chip(bp, reset_code);
6806 /* Report UNLOAD_DONE to MCP */
6808 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6811 /* Free SKBs, SGEs, TPA pool and driver internals */
6812 bnx2x_free_skbs(bp);
6813 for_each_queue(bp, i)
6814 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6817 bp->state = BNX2X_STATE_CLOSED;
6819 netif_carrier_off(bp->dev);
6824 static void bnx2x_reset_task(struct work_struct *work)
6826 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6828 #ifdef BNX2X_STOP_ON_ERROR
6829 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6830 " so reset not done to allow debug dump,\n"
6831 KERN_ERR " you will need to reboot when done\n");
6837 if (!netif_running(bp->dev))
6838 goto reset_task_exit;
6840 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6841 bnx2x_nic_load(bp, LOAD_NORMAL);
6847 /* end of nic load/unload */
6852 * Init service functions
6855 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6859 /* Check if there is any driver already loaded */
6860 val = REG_RD(bp, MISC_REG_UNPREPARED);
6862 /* Check if it is the UNDI driver
6863 * UNDI driver initializes CID offset for normal bell to 0x7
6865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6866 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6868 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6869 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6872 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6874 int func = BP_FUNC(bp);
6878 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6880 /* try unload UNDI on port 0 */
6883 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6884 DRV_MSG_SEQ_NUMBER_MASK);
6885 reset_code = bnx2x_fw_command(bp, reset_code);
6887 /* if UNDI is loaded on the other port */
6888 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6890 /* send "DONE" for previous unload */
6891 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6893 /* unload UNDI on port 1 */
6896 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6897 DRV_MSG_SEQ_NUMBER_MASK);
6898 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6900 bnx2x_fw_command(bp, reset_code);
6903 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6904 HC_REG_CONFIG_0), 0x1000);
6906 /* close input traffic and wait for it */
6907 /* Do not rcv packets to BRB */
6909 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6910 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6911 /* Do not direct rcv packets that are not for MCP to
6914 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6915 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6918 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6919 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6922 /* save NIG port swap info */
6923 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6924 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6927 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6930 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6932 /* take the NIG out of reset and restore swap values */
6934 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6935 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6936 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6937 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6939 /* send unload done to the MCP */
6940 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6942 /* restore our func and fw_seq */
6945 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6946 DRV_MSG_SEQ_NUMBER_MASK);
6951 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6953 u32 val, val2, val3, val4, id;
6956 /* Get the chip revision id and number. */
6957 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6958 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6959 id = ((val & 0xffff) << 16);
6960 val = REG_RD(bp, MISC_REG_CHIP_REV);
6961 id |= ((val & 0xf) << 12);
6962 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6963 id |= ((val & 0xff) << 4);
6964 REG_RD(bp, MISC_REG_BOND_ID);
6966 bp->common.chip_id = id;
6967 bp->link_params.chip_id = bp->common.chip_id;
6968 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6970 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6971 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6972 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6973 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6974 bp->common.flash_size, bp->common.flash_size);
6976 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6977 bp->link_params.shmem_base = bp->common.shmem_base;
6978 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6980 if (!bp->common.shmem_base ||
6981 (bp->common.shmem_base < 0xA0000) ||
6982 (bp->common.shmem_base >= 0xC0000)) {
6983 BNX2X_DEV_INFO("MCP not active\n");
6984 bp->flags |= NO_MCP_FLAG;
6988 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6989 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6990 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6991 BNX2X_ERR("BAD MCP validity signature\n");
6993 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6994 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6996 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6997 bp->common.hw_config, bp->common.board);
6999 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7000 SHARED_HW_CFG_LED_MODE_MASK) >>
7001 SHARED_HW_CFG_LED_MODE_SHIFT);
7003 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7004 bp->common.bc_ver = val;
7005 BNX2X_DEV_INFO("bc_ver %X\n", val);
7006 if (val < BNX2X_BC_VER) {
7007 /* for now only warn
7008 * later we might need to enforce this */
7009 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7010 " please upgrade BC\n", BNX2X_BC_VER, val);
7013 if (BP_E1HVN(bp) == 0) {
7014 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7015 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7017 /* no WOL capability for E1HVN != 0 */
7018 bp->flags |= NO_WOL_FLAG;
7020 BNX2X_DEV_INFO("%sWoL capable\n",
7021 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7023 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7024 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7025 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7026 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7028 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7029 val, val2, val3, val4);
7032 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7035 int port = BP_PORT(bp);
7038 switch (switch_cfg) {
7040 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7043 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7044 switch (ext_phy_type) {
7045 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7046 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7049 bp->port.supported |= (SUPPORTED_10baseT_Half |
7050 SUPPORTED_10baseT_Full |
7051 SUPPORTED_100baseT_Half |
7052 SUPPORTED_100baseT_Full |
7053 SUPPORTED_1000baseT_Full |
7054 SUPPORTED_2500baseX_Full |
7059 SUPPORTED_Asym_Pause);
7062 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7063 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7066 bp->port.supported |= (SUPPORTED_10baseT_Half |
7067 SUPPORTED_10baseT_Full |
7068 SUPPORTED_100baseT_Half |
7069 SUPPORTED_100baseT_Full |
7070 SUPPORTED_1000baseT_Full |
7075 SUPPORTED_Asym_Pause);
7079 BNX2X_ERR("NVRAM config error. "
7080 "BAD SerDes ext_phy_config 0x%x\n",
7081 bp->link_params.ext_phy_config);
7085 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7087 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7090 case SWITCH_CFG_10G:
7091 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7094 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7095 switch (ext_phy_type) {
7096 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7097 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7100 bp->port.supported |= (SUPPORTED_10baseT_Half |
7101 SUPPORTED_10baseT_Full |
7102 SUPPORTED_100baseT_Half |
7103 SUPPORTED_100baseT_Full |
7104 SUPPORTED_1000baseT_Full |
7105 SUPPORTED_2500baseX_Full |
7106 SUPPORTED_10000baseT_Full |
7111 SUPPORTED_Asym_Pause);
7114 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7115 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7118 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7121 SUPPORTED_Asym_Pause);
7124 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7125 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7128 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7129 SUPPORTED_1000baseT_Full |
7132 SUPPORTED_Asym_Pause);
7135 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7136 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7139 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7140 SUPPORTED_1000baseT_Full |
7144 SUPPORTED_Asym_Pause);
7147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7148 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7151 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7152 SUPPORTED_2500baseX_Full |
7153 SUPPORTED_1000baseT_Full |
7157 SUPPORTED_Asym_Pause);
7160 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7161 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7164 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7168 SUPPORTED_Asym_Pause);
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7172 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7173 bp->link_params.ext_phy_config);
7177 BNX2X_ERR("NVRAM config error. "
7178 "BAD XGXS ext_phy_config 0x%x\n",
7179 bp->link_params.ext_phy_config);
7183 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7185 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7190 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7191 bp->port.link_config);
7194 bp->link_params.phy_addr = bp->port.phy_addr;
7196 /* mask what we support according to speed_cap_mask */
7197 if (!(bp->link_params.speed_cap_mask &
7198 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7199 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7201 if (!(bp->link_params.speed_cap_mask &
7202 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7203 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7205 if (!(bp->link_params.speed_cap_mask &
7206 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7207 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7209 if (!(bp->link_params.speed_cap_mask &
7210 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7211 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7213 if (!(bp->link_params.speed_cap_mask &
7214 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7215 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7216 SUPPORTED_1000baseT_Full);
7218 if (!(bp->link_params.speed_cap_mask &
7219 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7220 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7222 if (!(bp->link_params.speed_cap_mask &
7223 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7224 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7226 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7229 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7231 bp->link_params.req_duplex = DUPLEX_FULL;
7233 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7234 case PORT_FEATURE_LINK_SPEED_AUTO:
7235 if (bp->port.supported & SUPPORTED_Autoneg) {
7236 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7237 bp->port.advertising = bp->port.supported;
7240 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7242 if ((ext_phy_type ==
7243 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7245 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7246 /* force 10G, no AN */
7247 bp->link_params.req_line_speed = SPEED_10000;
7248 bp->port.advertising =
7249 (ADVERTISED_10000baseT_Full |
7253 BNX2X_ERR("NVRAM config error. "
7254 "Invalid link_config 0x%x"
7255 " Autoneg not supported\n",
7256 bp->port.link_config);
7261 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7262 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7263 bp->link_params.req_line_speed = SPEED_10;
7264 bp->port.advertising = (ADVERTISED_10baseT_Full |
7267 BNX2X_ERR("NVRAM config error. "
7268 "Invalid link_config 0x%x"
7269 " speed_cap_mask 0x%x\n",
7270 bp->port.link_config,
7271 bp->link_params.speed_cap_mask);
7276 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7277 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7278 bp->link_params.req_line_speed = SPEED_10;
7279 bp->link_params.req_duplex = DUPLEX_HALF;
7280 bp->port.advertising = (ADVERTISED_10baseT_Half |
7283 BNX2X_ERR("NVRAM config error. "
7284 "Invalid link_config 0x%x"
7285 " speed_cap_mask 0x%x\n",
7286 bp->port.link_config,
7287 bp->link_params.speed_cap_mask);
7292 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7293 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7294 bp->link_params.req_line_speed = SPEED_100;
7295 bp->port.advertising = (ADVERTISED_100baseT_Full |
7298 BNX2X_ERR("NVRAM config error. "
7299 "Invalid link_config 0x%x"
7300 " speed_cap_mask 0x%x\n",
7301 bp->port.link_config,
7302 bp->link_params.speed_cap_mask);
7307 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7308 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7309 bp->link_params.req_line_speed = SPEED_100;
7310 bp->link_params.req_duplex = DUPLEX_HALF;
7311 bp->port.advertising = (ADVERTISED_100baseT_Half |
7314 BNX2X_ERR("NVRAM config error. "
7315 "Invalid link_config 0x%x"
7316 " speed_cap_mask 0x%x\n",
7317 bp->port.link_config,
7318 bp->link_params.speed_cap_mask);
7323 case PORT_FEATURE_LINK_SPEED_1G:
7324 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7325 bp->link_params.req_line_speed = SPEED_1000;
7326 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7329 BNX2X_ERR("NVRAM config error. "
7330 "Invalid link_config 0x%x"
7331 " speed_cap_mask 0x%x\n",
7332 bp->port.link_config,
7333 bp->link_params.speed_cap_mask);
7338 case PORT_FEATURE_LINK_SPEED_2_5G:
7339 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7340 bp->link_params.req_line_speed = SPEED_2500;
7341 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7344 BNX2X_ERR("NVRAM config error. "
7345 "Invalid link_config 0x%x"
7346 " speed_cap_mask 0x%x\n",
7347 bp->port.link_config,
7348 bp->link_params.speed_cap_mask);
7353 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7354 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7355 case PORT_FEATURE_LINK_SPEED_10G_KR:
7356 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7357 bp->link_params.req_line_speed = SPEED_10000;
7358 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7361 BNX2X_ERR("NVRAM config error. "
7362 "Invalid link_config 0x%x"
7363 " speed_cap_mask 0x%x\n",
7364 bp->port.link_config,
7365 bp->link_params.speed_cap_mask);
7371 BNX2X_ERR("NVRAM config error. "
7372 "BAD link speed link_config 0x%x\n",
7373 bp->port.link_config);
7374 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7375 bp->port.advertising = bp->port.supported;
7379 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7380 PORT_FEATURE_FLOW_CONTROL_MASK);
7381 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7382 !(bp->port.supported & SUPPORTED_Autoneg))
7383 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7385 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7386 " advertising 0x%x\n",
7387 bp->link_params.req_line_speed,
7388 bp->link_params.req_duplex,
7389 bp->link_params.req_flow_ctrl, bp->port.advertising);
7392 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7394 int port = BP_PORT(bp);
7397 bp->link_params.bp = bp;
7398 bp->link_params.port = port;
7400 bp->link_params.serdes_config =
7401 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7402 bp->link_params.lane_config =
7403 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7404 bp->link_params.ext_phy_config =
7406 dev_info.port_hw_config[port].external_phy_config);
7407 bp->link_params.speed_cap_mask =
7409 dev_info.port_hw_config[port].speed_capability_mask);
7411 bp->port.link_config =
7412 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7414 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7415 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7416 " link_config 0x%08x\n",
7417 bp->link_params.serdes_config,
7418 bp->link_params.lane_config,
7419 bp->link_params.ext_phy_config,
7420 bp->link_params.speed_cap_mask, bp->port.link_config);
7422 bp->link_params.switch_cfg = (bp->port.link_config &
7423 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7424 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7426 bnx2x_link_settings_requested(bp);
7428 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7429 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7430 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7431 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7432 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7433 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7434 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7435 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7436 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7437 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7440 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7442 int func = BP_FUNC(bp);
7446 bnx2x_get_common_hwinfo(bp);
7450 if (CHIP_IS_E1H(bp)) {
7452 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7454 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7455 FUNC_MF_CFG_E1HOV_TAG_MASK);
7456 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7460 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7462 func, bp->e1hov, bp->e1hov);
7464 BNX2X_DEV_INFO("Single function mode\n");
7466 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7467 " aborting\n", func);
7473 if (!BP_NOMCP(bp)) {
7474 bnx2x_get_port_hwinfo(bp);
7476 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7477 DRV_MSG_SEQ_NUMBER_MASK);
7478 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7482 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7483 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7484 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7485 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7486 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7487 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7488 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7489 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7490 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7491 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7492 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7494 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7502 /* only supposed to happen on emulation/FPGA */
7503 BNX2X_ERR("warning random MAC workaround active\n");
7504 random_ether_addr(bp->dev->dev_addr);
7505 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7511 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7513 int func = BP_FUNC(bp);
7516 /* Disable interrupt handling until HW is initialized */
7517 atomic_set(&bp->intr_sem, 1);
7519 mutex_init(&bp->port.phy_mutex);
7521 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7522 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7524 rc = bnx2x_get_hwinfo(bp);
7526 /* need to reset chip if undi was active */
7528 bnx2x_undi_unload(bp);
7530 if (CHIP_REV_IS_FPGA(bp))
7531 printk(KERN_ERR PFX "FPGA detected\n");
7533 if (BP_NOMCP(bp) && (func == 0))
7535 "MCP disabled, must load devices in order!\n");
7539 bp->flags &= ~TPA_ENABLE_FLAG;
7540 bp->dev->features &= ~NETIF_F_LRO;
7542 bp->flags |= TPA_ENABLE_FLAG;
7543 bp->dev->features |= NETIF_F_LRO;
7547 bp->tx_ring_size = MAX_TX_AVAIL;
7548 bp->rx_ring_size = MAX_RX_AVAIL;
7556 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7557 bp->current_interval = (poll ? poll : bp->timer_interval);
7559 init_timer(&bp->timer);
7560 bp->timer.expires = jiffies + bp->current_interval;
7561 bp->timer.data = (unsigned long) bp;
7562 bp->timer.function = bnx2x_timer;
7568 * ethtool service functions
7571 /* All ethtool functions called with rtnl_lock */
7573 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7575 struct bnx2x *bp = netdev_priv(dev);
7577 cmd->supported = bp->port.supported;
7578 cmd->advertising = bp->port.advertising;
7580 if (netif_carrier_ok(dev)) {
7581 cmd->speed = bp->link_vars.line_speed;
7582 cmd->duplex = bp->link_vars.duplex;
7584 cmd->speed = bp->link_params.req_line_speed;
7585 cmd->duplex = bp->link_params.req_duplex;
7590 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7591 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7592 if (vn_max_rate < cmd->speed)
7593 cmd->speed = vn_max_rate;
7596 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7598 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7600 switch (ext_phy_type) {
7601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7603 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7604 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7606 cmd->port = PORT_FIBRE;
7609 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7610 cmd->port = PORT_TP;
7613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7614 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7615 bp->link_params.ext_phy_config);
7619 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7620 bp->link_params.ext_phy_config);
7624 cmd->port = PORT_TP;
7626 cmd->phy_address = bp->port.phy_addr;
7627 cmd->transceiver = XCVR_INTERNAL;
7629 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7630 cmd->autoneg = AUTONEG_ENABLE;
7632 cmd->autoneg = AUTONEG_DISABLE;
7637 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7638 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7639 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7640 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7641 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7642 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7643 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7648 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7650 struct bnx2x *bp = netdev_priv(dev);
7656 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7657 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7658 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7659 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7660 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7661 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7662 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7664 if (cmd->autoneg == AUTONEG_ENABLE) {
7665 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7666 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7670 /* advertise the requested speed and duplex if supported */
7671 cmd->advertising &= bp->port.supported;
7673 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7674 bp->link_params.req_duplex = DUPLEX_FULL;
7675 bp->port.advertising |= (ADVERTISED_Autoneg |
7678 } else { /* forced speed */
7679 /* advertise the requested speed and duplex if supported */
7680 switch (cmd->speed) {
7682 if (cmd->duplex == DUPLEX_FULL) {
7683 if (!(bp->port.supported &
7684 SUPPORTED_10baseT_Full)) {
7686 "10M full not supported\n");
7690 advertising = (ADVERTISED_10baseT_Full |
7693 if (!(bp->port.supported &
7694 SUPPORTED_10baseT_Half)) {
7696 "10M half not supported\n");
7700 advertising = (ADVERTISED_10baseT_Half |
7706 if (cmd->duplex == DUPLEX_FULL) {
7707 if (!(bp->port.supported &
7708 SUPPORTED_100baseT_Full)) {
7710 "100M full not supported\n");
7714 advertising = (ADVERTISED_100baseT_Full |
7717 if (!(bp->port.supported &
7718 SUPPORTED_100baseT_Half)) {
7720 "100M half not supported\n");
7724 advertising = (ADVERTISED_100baseT_Half |
7730 if (cmd->duplex != DUPLEX_FULL) {
7731 DP(NETIF_MSG_LINK, "1G half not supported\n");
7735 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7736 DP(NETIF_MSG_LINK, "1G full not supported\n");
7740 advertising = (ADVERTISED_1000baseT_Full |
7745 if (cmd->duplex != DUPLEX_FULL) {
7747 "2.5G half not supported\n");
7751 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7753 "2.5G full not supported\n");
7757 advertising = (ADVERTISED_2500baseX_Full |
7762 if (cmd->duplex != DUPLEX_FULL) {
7763 DP(NETIF_MSG_LINK, "10G half not supported\n");
7767 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7768 DP(NETIF_MSG_LINK, "10G full not supported\n");
7772 advertising = (ADVERTISED_10000baseT_Full |
7777 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7781 bp->link_params.req_line_speed = cmd->speed;
7782 bp->link_params.req_duplex = cmd->duplex;
7783 bp->port.advertising = advertising;
7786 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7787 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7788 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7789 bp->port.advertising);
7791 if (netif_running(dev)) {
7792 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7799 #define PHY_FW_VER_LEN 10
7801 static void bnx2x_get_drvinfo(struct net_device *dev,
7802 struct ethtool_drvinfo *info)
7804 struct bnx2x *bp = netdev_priv(dev);
7805 u8 phy_fw_ver[PHY_FW_VER_LEN];
7807 strcpy(info->driver, DRV_MODULE_NAME);
7808 strcpy(info->version, DRV_MODULE_VERSION);
7810 phy_fw_ver[0] = '\0';
7812 bnx2x_acquire_phy_lock(bp);
7813 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7814 (bp->state != BNX2X_STATE_CLOSED),
7815 phy_fw_ver, PHY_FW_VER_LEN);
7816 bnx2x_release_phy_lock(bp);
7819 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7820 (bp->common.bc_ver & 0xff0000) >> 16,
7821 (bp->common.bc_ver & 0xff00) >> 8,
7822 (bp->common.bc_ver & 0xff),
7823 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7824 strcpy(info->bus_info, pci_name(bp->pdev));
7825 info->n_stats = BNX2X_NUM_STATS;
7826 info->testinfo_len = BNX2X_NUM_TESTS;
7827 info->eedump_len = bp->common.flash_size;
7828 info->regdump_len = 0;
7831 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7833 struct bnx2x *bp = netdev_priv(dev);
7835 if (bp->flags & NO_WOL_FLAG) {
7839 wol->supported = WAKE_MAGIC;
7841 wol->wolopts = WAKE_MAGIC;
7845 memset(&wol->sopass, 0, sizeof(wol->sopass));
7848 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7850 struct bnx2x *bp = netdev_priv(dev);
7852 if (wol->wolopts & ~WAKE_MAGIC)
7855 if (wol->wolopts & WAKE_MAGIC) {
7856 if (bp->flags & NO_WOL_FLAG)
7866 static u32 bnx2x_get_msglevel(struct net_device *dev)
7868 struct bnx2x *bp = netdev_priv(dev);
7870 return bp->msglevel;
7873 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7875 struct bnx2x *bp = netdev_priv(dev);
7877 if (capable(CAP_NET_ADMIN))
7878 bp->msglevel = level;
7881 static int bnx2x_nway_reset(struct net_device *dev)
7883 struct bnx2x *bp = netdev_priv(dev);
7888 if (netif_running(dev)) {
7889 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7896 static int bnx2x_get_eeprom_len(struct net_device *dev)
7898 struct bnx2x *bp = netdev_priv(dev);
7900 return bp->common.flash_size;
7903 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7905 int port = BP_PORT(bp);
7909 /* adjust timeout for emulation/FPGA */
7910 count = NVRAM_TIMEOUT_COUNT;
7911 if (CHIP_REV_IS_SLOW(bp))
7914 /* request access to nvram interface */
7915 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7916 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7918 for (i = 0; i < count*10; i++) {
7919 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7920 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7926 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7927 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7934 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7936 int port = BP_PORT(bp);
7940 /* adjust timeout for emulation/FPGA */
7941 count = NVRAM_TIMEOUT_COUNT;
7942 if (CHIP_REV_IS_SLOW(bp))
7945 /* relinquish nvram interface */
7946 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7947 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7949 for (i = 0; i < count*10; i++) {
7950 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7951 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7957 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7958 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7965 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7969 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7971 /* enable both bits, even on read */
7972 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7973 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7974 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7977 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7981 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7983 /* disable both bits, even after read */
7984 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7985 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7986 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7989 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7995 /* build the command word */
7996 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7998 /* need to clear DONE bit separately */
7999 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8001 /* address of the NVRAM to read from */
8002 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8003 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8005 /* issue a read command */
8006 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8008 /* adjust timeout for emulation/FPGA */
8009 count = NVRAM_TIMEOUT_COUNT;
8010 if (CHIP_REV_IS_SLOW(bp))
8013 /* wait for completion */
8016 for (i = 0; i < count; i++) {
8018 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8020 if (val & MCPR_NVM_COMMAND_DONE) {
8021 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8022 /* we read nvram data in cpu order
8023 * but ethtool sees it as an array of bytes
8024 * converting to big-endian will do the work */
8025 val = cpu_to_be32(val);
8035 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8042 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8044 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8049 if (offset + buf_size > bp->common.flash_size) {
8050 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8051 " buf_size (0x%x) > flash_size (0x%x)\n",
8052 offset, buf_size, bp->common.flash_size);
8056 /* request access to nvram interface */
8057 rc = bnx2x_acquire_nvram_lock(bp);
8061 /* enable access to nvram interface */
8062 bnx2x_enable_nvram_access(bp);
8064 /* read the first word(s) */
8065 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8066 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8067 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8068 memcpy(ret_buf, &val, 4);
8070 /* advance to the next dword */
8071 offset += sizeof(u32);
8072 ret_buf += sizeof(u32);
8073 buf_size -= sizeof(u32);
8078 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8079 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8080 memcpy(ret_buf, &val, 4);
8083 /* disable access to nvram interface */
8084 bnx2x_disable_nvram_access(bp);
8085 bnx2x_release_nvram_lock(bp);
8090 static int bnx2x_get_eeprom(struct net_device *dev,
8091 struct ethtool_eeprom *eeprom, u8 *eebuf)
8093 struct bnx2x *bp = netdev_priv(dev);
8096 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8097 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8098 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8099 eeprom->len, eeprom->len);
8101 /* parameters already validated in ethtool_get_eeprom */
8103 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8108 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8113 /* build the command word */
8114 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8116 /* need to clear DONE bit separately */
8117 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8119 /* write the data */
8120 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8122 /* address of the NVRAM to write to */
8123 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8124 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8126 /* issue the write command */
8127 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8129 /* adjust timeout for emulation/FPGA */
8130 count = NVRAM_TIMEOUT_COUNT;
8131 if (CHIP_REV_IS_SLOW(bp))
8134 /* wait for completion */
8136 for (i = 0; i < count; i++) {
8138 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8139 if (val & MCPR_NVM_COMMAND_DONE) {
8148 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8150 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8158 if (offset + buf_size > bp->common.flash_size) {
8159 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8160 " buf_size (0x%x) > flash_size (0x%x)\n",
8161 offset, buf_size, bp->common.flash_size);
8165 /* request access to nvram interface */
8166 rc = bnx2x_acquire_nvram_lock(bp);
8170 /* enable access to nvram interface */
8171 bnx2x_enable_nvram_access(bp);
8173 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8174 align_offset = (offset & ~0x03);
8175 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8178 val &= ~(0xff << BYTE_OFFSET(offset));
8179 val |= (*data_buf << BYTE_OFFSET(offset));
8181 /* nvram data is returned as an array of bytes
8182 * convert it back to cpu order */
8183 val = be32_to_cpu(val);
8185 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8189 /* disable access to nvram interface */
8190 bnx2x_disable_nvram_access(bp);
8191 bnx2x_release_nvram_lock(bp);
8196 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8204 if (buf_size == 1) /* ethtool */
8205 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8207 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8209 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8214 if (offset + buf_size > bp->common.flash_size) {
8215 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8216 " buf_size (0x%x) > flash_size (0x%x)\n",
8217 offset, buf_size, bp->common.flash_size);
8221 /* request access to nvram interface */
8222 rc = bnx2x_acquire_nvram_lock(bp);
8226 /* enable access to nvram interface */
8227 bnx2x_enable_nvram_access(bp);
8230 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8231 while ((written_so_far < buf_size) && (rc == 0)) {
8232 if (written_so_far == (buf_size - sizeof(u32)))
8233 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8234 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8235 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8236 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8237 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8239 memcpy(&val, data_buf, 4);
8241 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8243 /* advance to the next dword */
8244 offset += sizeof(u32);
8245 data_buf += sizeof(u32);
8246 written_so_far += sizeof(u32);
8250 /* disable access to nvram interface */
8251 bnx2x_disable_nvram_access(bp);
8252 bnx2x_release_nvram_lock(bp);
8257 static int bnx2x_set_eeprom(struct net_device *dev,
8258 struct ethtool_eeprom *eeprom, u8 *eebuf)
8260 struct bnx2x *bp = netdev_priv(dev);
8263 if (!netif_running(dev))
8266 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8267 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8268 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8269 eeprom->len, eeprom->len);
8271 /* parameters already validated in ethtool_set_eeprom */
8273 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8274 if (eeprom->magic == 0x00504859)
8277 bnx2x_acquire_phy_lock(bp);
8278 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8279 bp->link_params.ext_phy_config,
8280 (bp->state != BNX2X_STATE_CLOSED),
8281 eebuf, eeprom->len);
8282 if ((bp->state == BNX2X_STATE_OPEN) ||
8283 (bp->state == BNX2X_STATE_DISABLED)) {
8284 rc |= bnx2x_link_reset(&bp->link_params,
8286 rc |= bnx2x_phy_init(&bp->link_params,
8289 bnx2x_release_phy_lock(bp);
8291 } else /* Only the PMF can access the PHY */
8294 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8299 static int bnx2x_get_coalesce(struct net_device *dev,
8300 struct ethtool_coalesce *coal)
8302 struct bnx2x *bp = netdev_priv(dev);
8304 memset(coal, 0, sizeof(struct ethtool_coalesce));
8306 coal->rx_coalesce_usecs = bp->rx_ticks;
8307 coal->tx_coalesce_usecs = bp->tx_ticks;
8312 static int bnx2x_set_coalesce(struct net_device *dev,
8313 struct ethtool_coalesce *coal)
8315 struct bnx2x *bp = netdev_priv(dev);
8317 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8318 if (bp->rx_ticks > 3000)
8319 bp->rx_ticks = 3000;
8321 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8322 if (bp->tx_ticks > 0x3000)
8323 bp->tx_ticks = 0x3000;
8325 if (netif_running(dev))
8326 bnx2x_update_coalesce(bp);
8331 static void bnx2x_get_ringparam(struct net_device *dev,
8332 struct ethtool_ringparam *ering)
8334 struct bnx2x *bp = netdev_priv(dev);
8336 ering->rx_max_pending = MAX_RX_AVAIL;
8337 ering->rx_mini_max_pending = 0;
8338 ering->rx_jumbo_max_pending = 0;
8340 ering->rx_pending = bp->rx_ring_size;
8341 ering->rx_mini_pending = 0;
8342 ering->rx_jumbo_pending = 0;
8344 ering->tx_max_pending = MAX_TX_AVAIL;
8345 ering->tx_pending = bp->tx_ring_size;
8348 static int bnx2x_set_ringparam(struct net_device *dev,
8349 struct ethtool_ringparam *ering)
8351 struct bnx2x *bp = netdev_priv(dev);
8354 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8355 (ering->tx_pending > MAX_TX_AVAIL) ||
8356 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8359 bp->rx_ring_size = ering->rx_pending;
8360 bp->tx_ring_size = ering->tx_pending;
8362 if (netif_running(dev)) {
8363 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8364 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8370 static void bnx2x_get_pauseparam(struct net_device *dev,
8371 struct ethtool_pauseparam *epause)
8373 struct bnx2x *bp = netdev_priv(dev);
8375 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8376 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8378 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8379 BNX2X_FLOW_CTRL_RX);
8380 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8381 BNX2X_FLOW_CTRL_TX);
8383 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8384 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8385 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8388 static int bnx2x_set_pauseparam(struct net_device *dev,
8389 struct ethtool_pauseparam *epause)
8391 struct bnx2x *bp = netdev_priv(dev);
8396 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8397 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8398 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8400 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8402 if (epause->rx_pause)
8403 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8405 if (epause->tx_pause)
8406 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8408 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8409 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8411 if (epause->autoneg) {
8412 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8413 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8417 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8418 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8422 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8424 if (netif_running(dev)) {
8425 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8432 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8434 struct bnx2x *bp = netdev_priv(dev);
8438 /* TPA requires Rx CSUM offloading */
8439 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8440 if (!(dev->features & NETIF_F_LRO)) {
8441 dev->features |= NETIF_F_LRO;
8442 bp->flags |= TPA_ENABLE_FLAG;
8446 } else if (dev->features & NETIF_F_LRO) {
8447 dev->features &= ~NETIF_F_LRO;
8448 bp->flags &= ~TPA_ENABLE_FLAG;
8452 if (changed && netif_running(dev)) {
8453 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8454 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8460 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8462 struct bnx2x *bp = netdev_priv(dev);
8467 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8469 struct bnx2x *bp = netdev_priv(dev);
8474 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8475 TPA'ed packets will be discarded due to wrong TCP CSUM */
8477 u32 flags = ethtool_op_get_flags(dev);
8479 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8485 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8488 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8489 dev->features |= NETIF_F_TSO6;
8491 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8492 dev->features &= ~NETIF_F_TSO6;
8498 static const struct {
8499 char string[ETH_GSTRING_LEN];
8500 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8501 { "register_test (offline)" },
8502 { "memory_test (offline)" },
8503 { "loopback_test (offline)" },
8504 { "nvram_test (online)" },
8505 { "interrupt_test (online)" },
8506 { "link_test (online)" },
8507 { "idle check (online)" },
8508 { "MC errors (online)" }
8511 static int bnx2x_self_test_count(struct net_device *dev)
8513 return BNX2X_NUM_TESTS;
8516 static int bnx2x_test_registers(struct bnx2x *bp)
8518 int idx, i, rc = -ENODEV;
8520 int port = BP_PORT(bp);
8521 static const struct {
8526 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8527 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8528 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8529 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8530 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8531 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8532 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8533 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8534 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8535 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8536 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8537 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8538 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8539 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8540 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8541 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8542 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8543 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8544 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8545 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8546 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8547 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8548 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8549 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8550 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8551 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8552 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8553 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8554 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8555 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8556 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8557 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8558 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8559 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8560 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8561 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8562 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8563 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8565 { 0xffffffff, 0, 0x00000000 }
8568 if (!netif_running(bp->dev))
8571 /* Repeat the test twice:
8572 First by writing 0x00000000, second by writing 0xffffffff */
8573 for (idx = 0; idx < 2; idx++) {
8580 wr_val = 0xffffffff;
8584 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8585 u32 offset, mask, save_val, val;
8587 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8588 mask = reg_tbl[i].mask;
8590 save_val = REG_RD(bp, offset);
8592 REG_WR(bp, offset, wr_val);
8593 val = REG_RD(bp, offset);
8595 /* Restore the original register's value */
8596 REG_WR(bp, offset, save_val);
8598 /* verify that value is as expected value */
8599 if ((val & mask) != (wr_val & mask))
8610 static int bnx2x_test_memory(struct bnx2x *bp)
8612 int i, j, rc = -ENODEV;
8614 static const struct {
8618 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8619 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8620 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8621 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8622 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8623 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8624 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8628 static const struct {
8634 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8635 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8636 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8637 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8638 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8639 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8641 { NULL, 0xffffffff, 0, 0 }
8644 if (!netif_running(bp->dev))
8647 /* Go through all the memories */
8648 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8649 for (j = 0; j < mem_tbl[i].size; j++)
8650 REG_RD(bp, mem_tbl[i].offset + j*4);
8652 /* Check the parity status */
8653 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8654 val = REG_RD(bp, prty_tbl[i].offset);
8655 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8656 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8658 "%s is 0x%x\n", prty_tbl[i].name, val);
8669 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8674 while (bnx2x_link_test(bp) && cnt--)
8678 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8680 unsigned int pkt_size, num_pkts, i;
8681 struct sk_buff *skb;
8682 unsigned char *packet;
8683 struct bnx2x_fastpath *fp = &bp->fp[0];
8684 u16 tx_start_idx, tx_idx;
8685 u16 rx_start_idx, rx_idx;
8687 struct sw_tx_bd *tx_buf;
8688 struct eth_tx_bd *tx_bd;
8690 union eth_rx_cqe *cqe;
8692 struct sw_rx_bd *rx_buf;
8696 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8697 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8698 bnx2x_acquire_phy_lock(bp);
8699 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8700 bnx2x_release_phy_lock(bp);
8702 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8703 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8704 bnx2x_acquire_phy_lock(bp);
8705 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8706 bnx2x_release_phy_lock(bp);
8707 /* wait until link state is restored */
8708 bnx2x_wait_for_link(bp, link_up);
8714 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8717 goto test_loopback_exit;
8719 packet = skb_put(skb, pkt_size);
8720 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8721 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8722 for (i = ETH_HLEN; i < pkt_size; i++)
8723 packet[i] = (unsigned char) (i & 0xff);
8726 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8727 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8729 pkt_prod = fp->tx_pkt_prod++;
8730 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8731 tx_buf->first_bd = fp->tx_bd_prod;
8734 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8735 mapping = pci_map_single(bp->pdev, skb->data,
8736 skb_headlen(skb), PCI_DMA_TODEVICE);
8737 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8738 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8739 tx_bd->nbd = cpu_to_le16(1);
8740 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8741 tx_bd->vlan = cpu_to_le16(pkt_prod);
8742 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8743 ETH_TX_BD_FLAGS_END_BD);
8744 tx_bd->general_data = ((UNICAST_ADDRESS <<
8745 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8749 fp->hw_tx_prods->bds_prod =
8750 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8751 mb(); /* FW restriction: must not reorder writing nbd and packets */
8752 fp->hw_tx_prods->packets_prod =
8753 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8754 DOORBELL(bp, FP_IDX(fp), 0);
8760 bp->dev->trans_start = jiffies;
8764 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8765 if (tx_idx != tx_start_idx + num_pkts)
8766 goto test_loopback_exit;
8768 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8769 if (rx_idx != rx_start_idx + num_pkts)
8770 goto test_loopback_exit;
8772 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8773 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8774 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8775 goto test_loopback_rx_exit;
8777 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8778 if (len != pkt_size)
8779 goto test_loopback_rx_exit;
8781 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8783 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8784 for (i = ETH_HLEN; i < pkt_size; i++)
8785 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8786 goto test_loopback_rx_exit;
8790 test_loopback_rx_exit:
8792 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8793 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8794 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8795 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8797 /* Update producers */
8798 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8802 bp->link_params.loopback_mode = LOOPBACK_NONE;
8807 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8811 if (!netif_running(bp->dev))
8812 return BNX2X_LOOPBACK_FAILED;
8814 bnx2x_netif_stop(bp, 1);
8816 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8817 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8818 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8821 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8822 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8823 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8826 bnx2x_netif_start(bp);
8831 #define CRC32_RESIDUAL 0xdebb20e3
8833 static int bnx2x_test_nvram(struct bnx2x *bp)
8835 static const struct {
8839 { 0, 0x14 }, /* bootstrap */
8840 { 0x14, 0xec }, /* dir */
8841 { 0x100, 0x350 }, /* manuf_info */
8842 { 0x450, 0xf0 }, /* feature_info */
8843 { 0x640, 0x64 }, /* upgrade_key_info */
8845 { 0x708, 0x70 }, /* manuf_key_info */
8850 u8 *data = (u8 *)buf;
8854 rc = bnx2x_nvram_read(bp, 0, data, 4);
8856 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8857 goto test_nvram_exit;
8860 magic = be32_to_cpu(buf[0]);
8861 if (magic != 0x669955aa) {
8862 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8864 goto test_nvram_exit;
8867 for (i = 0; nvram_tbl[i].size; i++) {
8869 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8873 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8874 goto test_nvram_exit;
8877 csum = ether_crc_le(nvram_tbl[i].size, data);
8878 if (csum != CRC32_RESIDUAL) {
8880 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8882 goto test_nvram_exit;
8890 static int bnx2x_test_intr(struct bnx2x *bp)
8892 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8895 if (!netif_running(bp->dev))
8898 config->hdr.length_6b = 0;
8899 config->hdr.offset = 0;
8900 config->hdr.client_id = BP_CL_ID(bp);
8901 config->hdr.reserved1 = 0;
8903 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8904 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8905 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8907 bp->set_mac_pending++;
8908 for (i = 0; i < 10; i++) {
8909 if (!bp->set_mac_pending)
8911 msleep_interruptible(10);
8920 static void bnx2x_self_test(struct net_device *dev,
8921 struct ethtool_test *etest, u64 *buf)
8923 struct bnx2x *bp = netdev_priv(dev);
8925 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8927 if (!netif_running(dev))
8930 /* offline tests are not supported in MF mode */
8932 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8934 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8937 link_up = bp->link_vars.link_up;
8938 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8939 bnx2x_nic_load(bp, LOAD_DIAG);
8940 /* wait until link state is restored */
8941 bnx2x_wait_for_link(bp, link_up);
8943 if (bnx2x_test_registers(bp) != 0) {
8945 etest->flags |= ETH_TEST_FL_FAILED;
8947 if (bnx2x_test_memory(bp) != 0) {
8949 etest->flags |= ETH_TEST_FL_FAILED;
8951 buf[2] = bnx2x_test_loopback(bp, link_up);
8953 etest->flags |= ETH_TEST_FL_FAILED;
8955 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8956 bnx2x_nic_load(bp, LOAD_NORMAL);
8957 /* wait until link state is restored */
8958 bnx2x_wait_for_link(bp, link_up);
8960 if (bnx2x_test_nvram(bp) != 0) {
8962 etest->flags |= ETH_TEST_FL_FAILED;
8964 if (bnx2x_test_intr(bp) != 0) {
8966 etest->flags |= ETH_TEST_FL_FAILED;
8969 if (bnx2x_link_test(bp) != 0) {
8971 etest->flags |= ETH_TEST_FL_FAILED;
8973 buf[7] = bnx2x_mc_assert(bp);
8975 etest->flags |= ETH_TEST_FL_FAILED;
8977 #ifdef BNX2X_EXTRA_DEBUG
8978 bnx2x_panic_dump(bp);
8982 static const struct {
8986 #define STATS_FLAGS_PORT 1
8987 #define STATS_FLAGS_FUNC 2
8988 u8 string[ETH_GSTRING_LEN];
8989 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8990 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8991 8, STATS_FLAGS_FUNC, "rx_bytes" },
8992 { STATS_OFFSET32(error_bytes_received_hi),
8993 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8994 { STATS_OFFSET32(total_bytes_transmitted_hi),
8995 8, STATS_FLAGS_FUNC, "tx_bytes" },
8996 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8997 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8998 { STATS_OFFSET32(total_unicast_packets_received_hi),
8999 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9000 { STATS_OFFSET32(total_multicast_packets_received_hi),
9001 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9002 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9003 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9004 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9005 8, STATS_FLAGS_FUNC, "tx_packets" },
9006 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9007 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9008 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9009 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9010 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9011 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9012 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9013 8, STATS_FLAGS_PORT, "rx_align_errors" },
9014 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9015 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9016 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9017 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9018 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9019 8, STATS_FLAGS_PORT, "tx_deferred" },
9020 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9021 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9022 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9023 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9024 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9025 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9026 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9027 8, STATS_FLAGS_PORT, "rx_fragments" },
9028 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9029 8, STATS_FLAGS_PORT, "rx_jabbers" },
9030 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9031 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9032 { STATS_OFFSET32(jabber_packets_received),
9033 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9034 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9035 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9036 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9037 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9038 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9039 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9040 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9041 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9042 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9043 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9044 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9045 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9046 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9047 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9048 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9049 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9050 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9051 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9052 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9053 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9054 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9055 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9056 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9057 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9058 { STATS_OFFSET32(mac_filter_discard),
9059 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9060 { STATS_OFFSET32(no_buff_discard),
9061 4, STATS_FLAGS_FUNC, "rx_discards" },
9062 { STATS_OFFSET32(xxoverflow_discard),
9063 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9064 { STATS_OFFSET32(brb_drop_hi),
9065 8, STATS_FLAGS_PORT, "brb_discard" },
9066 { STATS_OFFSET32(brb_truncate_hi),
9067 8, STATS_FLAGS_PORT, "brb_truncate" },
9068 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9069 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9070 { STATS_OFFSET32(rx_skb_alloc_failed),
9071 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9072 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9073 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9076 #define IS_NOT_E1HMF_STAT(bp, i) \
9077 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9079 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9081 struct bnx2x *bp = netdev_priv(dev);
9084 switch (stringset) {
9086 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9087 if (IS_NOT_E1HMF_STAT(bp, i))
9089 strcpy(buf + j*ETH_GSTRING_LEN,
9090 bnx2x_stats_arr[i].string);
9096 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9101 static int bnx2x_get_stats_count(struct net_device *dev)
9103 struct bnx2x *bp = netdev_priv(dev);
9104 int i, num_stats = 0;
9106 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9107 if (IS_NOT_E1HMF_STAT(bp, i))
9114 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9115 struct ethtool_stats *stats, u64 *buf)
9117 struct bnx2x *bp = netdev_priv(dev);
9118 u32 *hw_stats = (u32 *)&bp->eth_stats;
9121 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9122 if (IS_NOT_E1HMF_STAT(bp, i))
9125 if (bnx2x_stats_arr[i].size == 0) {
9126 /* skip this counter */
9131 if (bnx2x_stats_arr[i].size == 4) {
9132 /* 4-byte counter */
9133 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9137 /* 8-byte counter */
9138 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9139 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9144 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9146 struct bnx2x *bp = netdev_priv(dev);
9147 int port = BP_PORT(bp);
9150 if (!netif_running(dev))
9159 for (i = 0; i < (data * 2); i++) {
9161 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9162 bp->link_params.hw_led_mode,
9163 bp->link_params.chip_id);
9165 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9166 bp->link_params.hw_led_mode,
9167 bp->link_params.chip_id);
9169 msleep_interruptible(500);
9170 if (signal_pending(current))
9174 if (bp->link_vars.link_up)
9175 bnx2x_set_led(bp, port, LED_MODE_OPER,
9176 bp->link_vars.line_speed,
9177 bp->link_params.hw_led_mode,
9178 bp->link_params.chip_id);
9183 static struct ethtool_ops bnx2x_ethtool_ops = {
9184 .get_settings = bnx2x_get_settings,
9185 .set_settings = bnx2x_set_settings,
9186 .get_drvinfo = bnx2x_get_drvinfo,
9187 .get_wol = bnx2x_get_wol,
9188 .set_wol = bnx2x_set_wol,
9189 .get_msglevel = bnx2x_get_msglevel,
9190 .set_msglevel = bnx2x_set_msglevel,
9191 .nway_reset = bnx2x_nway_reset,
9192 .get_link = ethtool_op_get_link,
9193 .get_eeprom_len = bnx2x_get_eeprom_len,
9194 .get_eeprom = bnx2x_get_eeprom,
9195 .set_eeprom = bnx2x_set_eeprom,
9196 .get_coalesce = bnx2x_get_coalesce,
9197 .set_coalesce = bnx2x_set_coalesce,
9198 .get_ringparam = bnx2x_get_ringparam,
9199 .set_ringparam = bnx2x_set_ringparam,
9200 .get_pauseparam = bnx2x_get_pauseparam,
9201 .set_pauseparam = bnx2x_set_pauseparam,
9202 .get_rx_csum = bnx2x_get_rx_csum,
9203 .set_rx_csum = bnx2x_set_rx_csum,
9204 .get_tx_csum = ethtool_op_get_tx_csum,
9205 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9206 .set_flags = bnx2x_set_flags,
9207 .get_flags = ethtool_op_get_flags,
9208 .get_sg = ethtool_op_get_sg,
9209 .set_sg = ethtool_op_set_sg,
9210 .get_tso = ethtool_op_get_tso,
9211 .set_tso = bnx2x_set_tso,
9212 .self_test_count = bnx2x_self_test_count,
9213 .self_test = bnx2x_self_test,
9214 .get_strings = bnx2x_get_strings,
9215 .phys_id = bnx2x_phys_id,
9216 .get_stats_count = bnx2x_get_stats_count,
9217 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9220 /* end of ethtool_ops */
9222 /****************************************************************************
9223 * General service functions
9224 ****************************************************************************/
9226 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9230 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9234 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9235 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9236 PCI_PM_CTRL_PME_STATUS));
9238 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9239 /* delay required during transition out of D3hot */
9244 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9248 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9250 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9253 /* No more memory access after this point until
9254 * device is brought back to D0.
9265 * net_device service functions
9268 static int bnx2x_poll(struct napi_struct *napi, int budget)
9270 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9272 struct bnx2x *bp = fp->bp;
9276 #ifdef BNX2X_STOP_ON_ERROR
9277 if (unlikely(bp->panic))
9281 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9282 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9283 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9285 bnx2x_update_fpsb_idx(fp);
9287 if (BNX2X_HAS_TX_WORK(fp))
9288 bnx2x_tx_int(fp, budget);
9290 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9291 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9293 if (BNX2X_HAS_RX_WORK(fp))
9294 work_done = bnx2x_rx_int(fp, budget);
9296 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9297 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9298 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9301 /* must not complete if we consumed full budget */
9302 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9304 #ifdef BNX2X_STOP_ON_ERROR
9307 netif_rx_complete(napi);
9309 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9310 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9311 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9312 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9318 /* we split the first BD into headers and data BDs
9319 * to ease the pain of our fellow microcode engineers
9320 * we use one mapping for both BDs
9321 * So far this has only been observed to happen
9322 * in Other Operating Systems(TM)
9324 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9325 struct bnx2x_fastpath *fp,
9326 struct eth_tx_bd **tx_bd, u16 hlen,
9327 u16 bd_prod, int nbd)
9329 struct eth_tx_bd *h_tx_bd = *tx_bd;
9330 struct eth_tx_bd *d_tx_bd;
9332 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9334 /* first fix first BD */
9335 h_tx_bd->nbd = cpu_to_le16(nbd);
9336 h_tx_bd->nbytes = cpu_to_le16(hlen);
9338 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9339 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9340 h_tx_bd->addr_lo, h_tx_bd->nbd);
9342 /* now get a new data BD
9343 * (after the pbd) and fill it */
9344 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9345 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9347 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9348 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9350 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9351 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9352 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9354 /* this marks the BD as one that has no individual mapping
9355 * the FW ignores this flag in a BD not marked start
9357 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9358 DP(NETIF_MSG_TX_QUEUED,
9359 "TSO split data size is %d (%x:%x)\n",
9360 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9362 /* update tx_bd for marking the last BD flag */
9368 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9371 csum = (u16) ~csum_fold(csum_sub(csum,
9372 csum_partial(t_header - fix, fix, 0)));
9375 csum = (u16) ~csum_fold(csum_add(csum,
9376 csum_partial(t_header, -fix, 0)));
9378 return swab16(csum);
9381 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9385 if (skb->ip_summed != CHECKSUM_PARTIAL)
9389 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9391 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9392 rc |= XMIT_CSUM_TCP;
9396 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9397 rc |= XMIT_CSUM_TCP;
9401 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9404 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9410 /* check if packet requires linearization (packet is too fragmented) */
9411 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9416 int first_bd_sz = 0;
9418 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9419 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9421 if (xmit_type & XMIT_GSO) {
9422 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9423 /* Check if LSO packet needs to be copied:
9424 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9425 int wnd_size = MAX_FETCH_BD - 3;
9426 /* Number of windows to check */
9427 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9432 /* Headers length */
9433 hlen = (int)(skb_transport_header(skb) - skb->data) +
9436 /* Amount of data (w/o headers) on linear part of SKB*/
9437 first_bd_sz = skb_headlen(skb) - hlen;
9439 wnd_sum = first_bd_sz;
9441 /* Calculate the first sum - it's special */
9442 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9444 skb_shinfo(skb)->frags[frag_idx].size;
9446 /* If there was data on linear skb data - check it */
9447 if (first_bd_sz > 0) {
9448 if (unlikely(wnd_sum < lso_mss)) {
9453 wnd_sum -= first_bd_sz;
9456 /* Others are easier: run through the frag list and
9457 check all windows */
9458 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9460 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9462 if (unlikely(wnd_sum < lso_mss)) {
9467 skb_shinfo(skb)->frags[wnd_idx].size;
9471 /* in non-LSO too fragmented packet should always
9478 if (unlikely(to_copy))
9479 DP(NETIF_MSG_TX_QUEUED,
9480 "Linearization IS REQUIRED for %s packet. "
9481 "num_frags %d hlen %d first_bd_sz %d\n",
9482 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9483 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9488 /* called with netif_tx_lock
9489 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9490 * netif_wake_queue()
9492 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9494 struct bnx2x *bp = netdev_priv(dev);
9495 struct bnx2x_fastpath *fp;
9496 struct sw_tx_bd *tx_buf;
9497 struct eth_tx_bd *tx_bd;
9498 struct eth_tx_parse_bd *pbd = NULL;
9499 u16 pkt_prod, bd_prod;
9502 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9503 int vlan_off = (bp->e1hov ? 4 : 0);
9507 #ifdef BNX2X_STOP_ON_ERROR
9508 if (unlikely(bp->panic))
9509 return NETDEV_TX_BUSY;
9512 fp_index = (smp_processor_id() % bp->num_queues);
9513 fp = &bp->fp[fp_index];
9515 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9516 bp->eth_stats.driver_xoff++,
9517 netif_stop_queue(dev);
9518 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9519 return NETDEV_TX_BUSY;
9522 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9523 " gso type %x xmit_type %x\n",
9524 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9525 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9527 /* First, check if we need to linearize the skb
9528 (due to FW restrictions) */
9529 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9530 /* Statistics of linearization */
9532 if (skb_linearize(skb) != 0) {
9533 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9534 "silently dropping this SKB\n");
9535 dev_kfree_skb_any(skb);
9536 return NETDEV_TX_OK;
9541 Please read carefully. First we use one BD which we mark as start,
9542 then for TSO or xsum we have a parsing info BD,
9543 and only then we have the rest of the TSO BDs.
9544 (don't forget to mark the last one as last,
9545 and to unmap only AFTER you write to the BD ...)
9546 And above all, all pdb sizes are in words - NOT DWORDS!
9549 pkt_prod = fp->tx_pkt_prod++;
9550 bd_prod = TX_BD(fp->tx_bd_prod);
9552 /* get a tx_buf and first BD */
9553 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9554 tx_bd = &fp->tx_desc_ring[bd_prod];
9556 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9557 tx_bd->general_data = (UNICAST_ADDRESS <<
9558 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9560 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9562 /* remember the first BD of the packet */
9563 tx_buf->first_bd = fp->tx_bd_prod;
9566 DP(NETIF_MSG_TX_QUEUED,
9567 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9568 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9570 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9571 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9572 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9575 tx_bd->vlan = cpu_to_le16(pkt_prod);
9578 /* turn on parsing and get a BD */
9579 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9580 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9582 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9585 if (xmit_type & XMIT_CSUM) {
9586 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9588 /* for now NS flag is not used in Linux */
9589 pbd->global_data = (hlen |
9590 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9591 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9593 pbd->ip_hlen = (skb_transport_header(skb) -
9594 skb_network_header(skb)) / 2;
9596 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9598 pbd->total_hlen = cpu_to_le16(hlen);
9599 hlen = hlen*2 - vlan_off;
9601 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9603 if (xmit_type & XMIT_CSUM_V4)
9604 tx_bd->bd_flags.as_bitfield |=
9605 ETH_TX_BD_FLAGS_IP_CSUM;
9607 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9609 if (xmit_type & XMIT_CSUM_TCP) {
9610 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9613 s8 fix = SKB_CS_OFF(skb); /* signed! */
9615 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9616 pbd->cs_offset = fix / 2;
9618 DP(NETIF_MSG_TX_QUEUED,
9619 "hlen %d offset %d fix %d csum before fix %x\n",
9620 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9623 /* HW bug: fixup the CSUM */
9624 pbd->tcp_pseudo_csum =
9625 bnx2x_csum_fix(skb_transport_header(skb),
9628 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9629 pbd->tcp_pseudo_csum);
9633 mapping = pci_map_single(bp->pdev, skb->data,
9634 skb_headlen(skb), PCI_DMA_TODEVICE);
9636 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9637 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9638 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9639 tx_bd->nbd = cpu_to_le16(nbd);
9640 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9642 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9643 " nbytes %d flags %x vlan %x\n",
9644 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9645 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9646 le16_to_cpu(tx_bd->vlan));
9648 if (xmit_type & XMIT_GSO) {
9650 DP(NETIF_MSG_TX_QUEUED,
9651 "TSO packet len %d hlen %d total len %d tso size %d\n",
9652 skb->len, hlen, skb_headlen(skb),
9653 skb_shinfo(skb)->gso_size);
9655 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9657 if (unlikely(skb_headlen(skb) > hlen))
9658 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9661 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9662 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9663 pbd->tcp_flags = pbd_tcp_flags(skb);
9665 if (xmit_type & XMIT_GSO_V4) {
9666 pbd->ip_id = swab16(ip_hdr(skb)->id);
9667 pbd->tcp_pseudo_csum =
9668 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9670 0, IPPROTO_TCP, 0));
9673 pbd->tcp_pseudo_csum =
9674 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9675 &ipv6_hdr(skb)->daddr,
9676 0, IPPROTO_TCP, 0));
9678 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9681 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9682 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9684 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9685 tx_bd = &fp->tx_desc_ring[bd_prod];
9687 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9688 frag->size, PCI_DMA_TODEVICE);
9690 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9691 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9692 tx_bd->nbytes = cpu_to_le16(frag->size);
9693 tx_bd->vlan = cpu_to_le16(pkt_prod);
9694 tx_bd->bd_flags.as_bitfield = 0;
9696 DP(NETIF_MSG_TX_QUEUED,
9697 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9698 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9699 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9702 /* now at last mark the BD as the last BD */
9703 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9705 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9706 tx_bd, tx_bd->bd_flags.as_bitfield);
9708 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9710 /* now send a tx doorbell, counting the next BD
9711 * if the packet contains or ends with it
9713 if (TX_BD_POFF(bd_prod) < nbd)
9717 DP(NETIF_MSG_TX_QUEUED,
9718 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9719 " tcp_flags %x xsum %x seq %u hlen %u\n",
9720 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9721 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9722 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9724 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9727 * Make sure that the BD data is updated before updating the producer
9728 * since FW might read the BD right after the producer is updated.
9729 * This is only applicable for weak-ordered memory model archs such
9730 * as IA-64. The following barrier is also mandatory since FW will
9731 * assumes packets must have BDs.
9735 fp->hw_tx_prods->bds_prod =
9736 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9737 mb(); /* FW restriction: must not reorder writing nbd and packets */
9738 fp->hw_tx_prods->packets_prod =
9739 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9740 DOORBELL(bp, FP_IDX(fp), 0);
9744 fp->tx_bd_prod += nbd;
9745 dev->trans_start = jiffies;
9747 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9748 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9749 if we put Tx into XOFF state. */
9751 netif_stop_queue(dev);
9752 bp->eth_stats.driver_xoff++;
9753 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9754 netif_wake_queue(dev);
9758 return NETDEV_TX_OK;
9761 /* called with rtnl_lock */
9762 static int bnx2x_open(struct net_device *dev)
9764 struct bnx2x *bp = netdev_priv(dev);
9766 bnx2x_set_power_state(bp, PCI_D0);
9768 return bnx2x_nic_load(bp, LOAD_OPEN);
9771 /* called with rtnl_lock */
9772 static int bnx2x_close(struct net_device *dev)
9774 struct bnx2x *bp = netdev_priv(dev);
9776 /* Unload the driver, release IRQs */
9777 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9778 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9779 if (!CHIP_REV_IS_SLOW(bp))
9780 bnx2x_set_power_state(bp, PCI_D3hot);
9785 /* called with netif_tx_lock from set_multicast */
9786 static void bnx2x_set_rx_mode(struct net_device *dev)
9788 struct bnx2x *bp = netdev_priv(dev);
9789 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9790 int port = BP_PORT(bp);
9792 if (bp->state != BNX2X_STATE_OPEN) {
9793 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9797 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9799 if (dev->flags & IFF_PROMISC)
9800 rx_mode = BNX2X_RX_MODE_PROMISC;
9802 else if ((dev->flags & IFF_ALLMULTI) ||
9803 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9804 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9806 else { /* some multicasts */
9807 if (CHIP_IS_E1(bp)) {
9809 struct dev_mc_list *mclist;
9810 struct mac_configuration_cmd *config =
9811 bnx2x_sp(bp, mcast_config);
9813 for (i = 0, mclist = dev->mc_list;
9814 mclist && (i < dev->mc_count);
9815 i++, mclist = mclist->next) {
9817 config->config_table[i].
9818 cam_entry.msb_mac_addr =
9819 swab16(*(u16 *)&mclist->dmi_addr[0]);
9820 config->config_table[i].
9821 cam_entry.middle_mac_addr =
9822 swab16(*(u16 *)&mclist->dmi_addr[2]);
9823 config->config_table[i].
9824 cam_entry.lsb_mac_addr =
9825 swab16(*(u16 *)&mclist->dmi_addr[4]);
9826 config->config_table[i].cam_entry.flags =
9828 config->config_table[i].
9829 target_table_entry.flags = 0;
9830 config->config_table[i].
9831 target_table_entry.client_id = 0;
9832 config->config_table[i].
9833 target_table_entry.vlan_id = 0;
9836 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9837 config->config_table[i].
9838 cam_entry.msb_mac_addr,
9839 config->config_table[i].
9840 cam_entry.middle_mac_addr,
9841 config->config_table[i].
9842 cam_entry.lsb_mac_addr);
9844 old = config->hdr.length_6b;
9846 for (; i < old; i++) {
9847 if (CAM_IS_INVALID(config->
9849 i--; /* already invalidated */
9853 CAM_INVALIDATE(config->
9858 if (CHIP_REV_IS_SLOW(bp))
9859 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9861 offset = BNX2X_MAX_MULTICAST*(1 + port);
9863 config->hdr.length_6b = i;
9864 config->hdr.offset = offset;
9865 config->hdr.client_id = BP_CL_ID(bp);
9866 config->hdr.reserved1 = 0;
9868 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9869 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9870 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9873 /* Accept one or more multicasts */
9874 struct dev_mc_list *mclist;
9875 u32 mc_filter[MC_HASH_SIZE];
9876 u32 crc, bit, regidx;
9879 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9881 for (i = 0, mclist = dev->mc_list;
9882 mclist && (i < dev->mc_count);
9883 i++, mclist = mclist->next) {
9885 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9888 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9889 bit = (crc >> 24) & 0xff;
9892 mc_filter[regidx] |= (1 << bit);
9895 for (i = 0; i < MC_HASH_SIZE; i++)
9896 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9901 bp->rx_mode = rx_mode;
9902 bnx2x_set_storm_rx_mode(bp);
9905 /* called with rtnl_lock */
9906 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9908 struct sockaddr *addr = p;
9909 struct bnx2x *bp = netdev_priv(dev);
9911 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9914 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9915 if (netif_running(dev)) {
9917 bnx2x_set_mac_addr_e1(bp, 1);
9919 bnx2x_set_mac_addr_e1h(bp, 1);
9925 /* called with rtnl_lock */
9926 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9928 struct mii_ioctl_data *data = if_mii(ifr);
9929 struct bnx2x *bp = netdev_priv(dev);
9930 int port = BP_PORT(bp);
9935 data->phy_id = bp->port.phy_addr;
9942 if (!netif_running(dev))
9945 mutex_lock(&bp->port.phy_mutex);
9946 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9947 DEFAULT_PHY_DEV_ADDR,
9948 (data->reg_num & 0x1f), &mii_regval);
9949 data->val_out = mii_regval;
9950 mutex_unlock(&bp->port.phy_mutex);
9955 if (!capable(CAP_NET_ADMIN))
9958 if (!netif_running(dev))
9961 mutex_lock(&bp->port.phy_mutex);
9962 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9963 DEFAULT_PHY_DEV_ADDR,
9964 (data->reg_num & 0x1f), data->val_in);
9965 mutex_unlock(&bp->port.phy_mutex);
9976 /* called with rtnl_lock */
9977 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9979 struct bnx2x *bp = netdev_priv(dev);
9982 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9983 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9986 /* This does not race with packet allocation
9987 * because the actual alloc size is
9988 * only updated as part of load
9992 if (netif_running(dev)) {
9993 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9994 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10000 static void bnx2x_tx_timeout(struct net_device *dev)
10002 struct bnx2x *bp = netdev_priv(dev);
10004 #ifdef BNX2X_STOP_ON_ERROR
10008 /* This allows the netif to be shutdown gracefully before resetting */
10009 schedule_work(&bp->reset_task);
10013 /* called with rtnl_lock */
10014 static void bnx2x_vlan_rx_register(struct net_device *dev,
10015 struct vlan_group *vlgrp)
10017 struct bnx2x *bp = netdev_priv(dev);
10020 if (netif_running(dev))
10021 bnx2x_set_client_config(bp);
10026 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10027 static void poll_bnx2x(struct net_device *dev)
10029 struct bnx2x *bp = netdev_priv(dev);
10031 disable_irq(bp->pdev->irq);
10032 bnx2x_interrupt(bp->pdev->irq, dev);
10033 enable_irq(bp->pdev->irq);
10037 static const struct net_device_ops bnx2x_netdev_ops = {
10038 .ndo_open = bnx2x_open,
10039 .ndo_stop = bnx2x_close,
10040 .ndo_start_xmit = bnx2x_start_xmit,
10041 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10042 .ndo_set_mac_address = bnx2x_change_mac_addr,
10043 .ndo_validate_addr = eth_validate_addr,
10044 .ndo_do_ioctl = bnx2x_ioctl,
10045 .ndo_change_mtu = bnx2x_change_mtu,
10046 .ndo_tx_timeout = bnx2x_tx_timeout,
10048 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10050 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10051 .ndo_poll_controller = poll_bnx2x,
10056 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10057 struct net_device *dev)
10062 SET_NETDEV_DEV(dev, &pdev->dev);
10063 bp = netdev_priv(dev);
10068 bp->func = PCI_FUNC(pdev->devfn);
10070 rc = pci_enable_device(pdev);
10072 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10076 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10077 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10080 goto err_out_disable;
10083 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10084 printk(KERN_ERR PFX "Cannot find second PCI device"
10085 " base address, aborting\n");
10087 goto err_out_disable;
10090 if (atomic_read(&pdev->enable_cnt) == 1) {
10091 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10093 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10095 goto err_out_disable;
10098 pci_set_master(pdev);
10099 pci_save_state(pdev);
10102 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10103 if (bp->pm_cap == 0) {
10104 printk(KERN_ERR PFX "Cannot find power management"
10105 " capability, aborting\n");
10107 goto err_out_release;
10110 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10111 if (bp->pcie_cap == 0) {
10112 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10115 goto err_out_release;
10118 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10119 bp->flags |= USING_DAC_FLAG;
10120 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10121 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10122 " failed, aborting\n");
10124 goto err_out_release;
10127 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10128 printk(KERN_ERR PFX "System does not support DMA,"
10131 goto err_out_release;
10134 dev->mem_start = pci_resource_start(pdev, 0);
10135 dev->base_addr = dev->mem_start;
10136 dev->mem_end = pci_resource_end(pdev, 0);
10138 dev->irq = pdev->irq;
10140 bp->regview = pci_ioremap_bar(pdev, 0);
10141 if (!bp->regview) {
10142 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10144 goto err_out_release;
10147 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10148 min_t(u64, BNX2X_DB_SIZE,
10149 pci_resource_len(pdev, 2)));
10150 if (!bp->doorbells) {
10151 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10153 goto err_out_unmap;
10156 bnx2x_set_power_state(bp, PCI_D0);
10158 /* clean indirect addresses */
10159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10160 PCICFG_VENDOR_ID_OFFSET);
10161 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10162 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10163 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10164 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10166 dev->watchdog_timeo = TX_TIMEOUT;
10168 dev->netdev_ops = &bnx2x_netdev_ops;
10169 dev->ethtool_ops = &bnx2x_ethtool_ops;
10170 dev->features |= NETIF_F_SG;
10171 dev->features |= NETIF_F_HW_CSUM;
10172 if (bp->flags & USING_DAC_FLAG)
10173 dev->features |= NETIF_F_HIGHDMA;
10175 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10177 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10178 dev->features |= NETIF_F_TSO6;
10184 iounmap(bp->regview);
10185 bp->regview = NULL;
10187 if (bp->doorbells) {
10188 iounmap(bp->doorbells);
10189 bp->doorbells = NULL;
10193 if (atomic_read(&pdev->enable_cnt) == 1)
10194 pci_release_regions(pdev);
10197 pci_disable_device(pdev);
10198 pci_set_drvdata(pdev, NULL);
10204 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10206 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10208 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10212 /* return value of 1=2.5GHz 2=5GHz */
10213 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10215 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10217 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10221 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10222 const struct pci_device_id *ent)
10224 static int version_printed;
10225 struct net_device *dev = NULL;
10229 if (version_printed++ == 0)
10230 printk(KERN_INFO "%s", version);
10232 /* dev zeroed in init_etherdev */
10233 dev = alloc_etherdev(sizeof(*bp));
10235 printk(KERN_ERR PFX "Cannot allocate net device\n");
10239 bp = netdev_priv(dev);
10240 bp->msglevel = debug;
10242 rc = bnx2x_init_dev(pdev, dev);
10248 rc = register_netdev(dev);
10250 dev_err(&pdev->dev, "Cannot register net device\n");
10251 goto init_one_exit;
10254 pci_set_drvdata(pdev, dev);
10256 rc = bnx2x_init_bp(bp);
10258 unregister_netdev(dev);
10259 goto init_one_exit;
10262 netif_carrier_off(dev);
10264 bp->common.name = board_info[ent->driver_data].name;
10265 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10266 " IRQ %d, ", dev->name, bp->common.name,
10267 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10268 bnx2x_get_pcie_width(bp),
10269 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10270 dev->base_addr, bp->pdev->irq);
10271 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10276 iounmap(bp->regview);
10279 iounmap(bp->doorbells);
10283 if (atomic_read(&pdev->enable_cnt) == 1)
10284 pci_release_regions(pdev);
10286 pci_disable_device(pdev);
10287 pci_set_drvdata(pdev, NULL);
10292 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10294 struct net_device *dev = pci_get_drvdata(pdev);
10298 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10301 bp = netdev_priv(dev);
10303 unregister_netdev(dev);
10306 iounmap(bp->regview);
10309 iounmap(bp->doorbells);
10313 if (atomic_read(&pdev->enable_cnt) == 1)
10314 pci_release_regions(pdev);
10316 pci_disable_device(pdev);
10317 pci_set_drvdata(pdev, NULL);
10320 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10322 struct net_device *dev = pci_get_drvdata(pdev);
10326 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10329 bp = netdev_priv(dev);
10333 pci_save_state(pdev);
10335 if (!netif_running(dev)) {
10340 netif_device_detach(dev);
10342 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10344 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10351 static int bnx2x_resume(struct pci_dev *pdev)
10353 struct net_device *dev = pci_get_drvdata(pdev);
10358 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10361 bp = netdev_priv(dev);
10365 pci_restore_state(pdev);
10367 if (!netif_running(dev)) {
10372 bnx2x_set_power_state(bp, PCI_D0);
10373 netif_device_attach(dev);
10375 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10382 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10386 bp->state = BNX2X_STATE_ERROR;
10388 bp->rx_mode = BNX2X_RX_MODE_NONE;
10390 bnx2x_netif_stop(bp, 0);
10392 del_timer_sync(&bp->timer);
10393 bp->stats_state = STATS_STATE_DISABLED;
10394 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10397 bnx2x_free_irq(bp);
10399 if (CHIP_IS_E1(bp)) {
10400 struct mac_configuration_cmd *config =
10401 bnx2x_sp(bp, mcast_config);
10403 for (i = 0; i < config->hdr.length_6b; i++)
10404 CAM_INVALIDATE(config->config_table[i]);
10407 /* Free SKBs, SGEs, TPA pool and driver internals */
10408 bnx2x_free_skbs(bp);
10409 for_each_queue(bp, i)
10410 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10411 bnx2x_free_mem(bp);
10413 bp->state = BNX2X_STATE_CLOSED;
10415 netif_carrier_off(bp->dev);
10420 static void bnx2x_eeh_recover(struct bnx2x *bp)
10424 mutex_init(&bp->port.phy_mutex);
10426 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10427 bp->link_params.shmem_base = bp->common.shmem_base;
10428 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10430 if (!bp->common.shmem_base ||
10431 (bp->common.shmem_base < 0xA0000) ||
10432 (bp->common.shmem_base >= 0xC0000)) {
10433 BNX2X_DEV_INFO("MCP not active\n");
10434 bp->flags |= NO_MCP_FLAG;
10438 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10439 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10440 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10441 BNX2X_ERR("BAD MCP validity signature\n");
10443 if (!BP_NOMCP(bp)) {
10444 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10445 & DRV_MSG_SEQ_NUMBER_MASK);
10446 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10451 * bnx2x_io_error_detected - called when PCI error is detected
10452 * @pdev: Pointer to PCI device
10453 * @state: The current pci connection state
10455 * This function is called after a PCI bus error affecting
10456 * this device has been detected.
10458 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10459 pci_channel_state_t state)
10461 struct net_device *dev = pci_get_drvdata(pdev);
10462 struct bnx2x *bp = netdev_priv(dev);
10466 netif_device_detach(dev);
10468 if (netif_running(dev))
10469 bnx2x_eeh_nic_unload(bp);
10471 pci_disable_device(pdev);
10475 /* Request a slot reset */
10476 return PCI_ERS_RESULT_NEED_RESET;
10480 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10481 * @pdev: Pointer to PCI device
10483 * Restart the card from scratch, as if from a cold-boot.
10485 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10487 struct net_device *dev = pci_get_drvdata(pdev);
10488 struct bnx2x *bp = netdev_priv(dev);
10492 if (pci_enable_device(pdev)) {
10493 dev_err(&pdev->dev,
10494 "Cannot re-enable PCI device after reset\n");
10496 return PCI_ERS_RESULT_DISCONNECT;
10499 pci_set_master(pdev);
10500 pci_restore_state(pdev);
10502 if (netif_running(dev))
10503 bnx2x_set_power_state(bp, PCI_D0);
10507 return PCI_ERS_RESULT_RECOVERED;
10511 * bnx2x_io_resume - called when traffic can start flowing again
10512 * @pdev: Pointer to PCI device
10514 * This callback is called when the error recovery driver tells us that
10515 * its OK to resume normal operation.
10517 static void bnx2x_io_resume(struct pci_dev *pdev)
10519 struct net_device *dev = pci_get_drvdata(pdev);
10520 struct bnx2x *bp = netdev_priv(dev);
10524 bnx2x_eeh_recover(bp);
10526 if (netif_running(dev))
10527 bnx2x_nic_load(bp, LOAD_NORMAL);
10529 netif_device_attach(dev);
10534 static struct pci_error_handlers bnx2x_err_handler = {
10535 .error_detected = bnx2x_io_error_detected,
10536 .slot_reset = bnx2x_io_slot_reset,
10537 .resume = bnx2x_io_resume,
10540 static struct pci_driver bnx2x_pci_driver = {
10541 .name = DRV_MODULE_NAME,
10542 .id_table = bnx2x_pci_tbl,
10543 .probe = bnx2x_init_one,
10544 .remove = __devexit_p(bnx2x_remove_one),
10545 .suspend = bnx2x_suspend,
10546 .resume = bnx2x_resume,
10547 .err_handler = &bnx2x_err_handler,
10550 static int __init bnx2x_init(void)
10552 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10553 if (bnx2x_wq == NULL) {
10554 printk(KERN_ERR PFX "Cannot create workqueue\n");
10558 return pci_register_driver(&bnx2x_pci_driver);
10561 static void __exit bnx2x_cleanup(void)
10563 pci_unregister_driver(&bnx2x_pci_driver);
10565 destroy_workqueue(bnx2x_wq);
10568 module_init(bnx2x_init);
10569 module_exit(bnx2x_cleanup);