1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
51 #include <linux/stringify.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
62 #define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT (5*HZ)
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2);
87 static int multi_mode = 1;
88 module_param(multi_mode, int, 0);
89 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
93 module_param(num_queues, int, 0);
94 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
97 static int disable_tpa;
98 module_param(disable_tpa, int, 0);
99 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 module_param(int_mode, int, 0);
103 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 static int dropless_fc;
107 module_param(dropless_fc, int, 0);
108 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111 module_param(poll, int, 0);
112 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114 static int mrrs = -1;
115 module_param(mrrs, int, 0);
116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119 module_param(debug, int, 0);
120 MODULE_PARM_DESC(debug, " Default debug msglevel");
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
132 /* indexed by board_type, above */
135 } board_info[] __devinitdata = {
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712 0x1662
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E 0x1663
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
159 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
165 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
172 static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
180 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
183 size_t size = sizeof(struct ustorm_per_client_stats);
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
188 __storm_memset_fill(bp, addr, size, 0);
191 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
194 size_t size = sizeof(struct tstorm_per_client_stats);
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
199 __storm_memset_fill(bp, addr, size, 0);
202 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
205 size_t size = sizeof(struct xstorm_per_client_stats);
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
210 __storm_memset_fill(bp, addr, size, 0);
214 static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
220 __storm_memset_dma_mapping(bp, addr, mapping);
223 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
228 static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
240 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
244 size_t size = sizeof(struct stats_indication_flags);
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
251 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
255 size_t size = sizeof(struct stats_indication_flags);
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
262 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
266 size_t size = sizeof(struct stats_indication_flags);
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
273 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
277 size_t size = sizeof(struct stats_indication_flags);
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
284 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
290 __storm_memset_dma_mapping(bp, addr, mapping);
293 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
299 __storm_memset_dma_mapping(bp, addr, mapping);
302 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
308 __storm_memset_dma_mapping(bp, addr, mapping);
311 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
317 __storm_memset_dma_mapping(bp, addr, mapping);
320 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
333 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
346 static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
350 size_t size = sizeof(struct event_ring_data);
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
357 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
364 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
381 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
404 * locking is done by mcp
406 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
414 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
426 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE "dst_addr [none]"
432 void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495 const u32 dmae_reg_go_c[] = {
496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
502 /* copy command into DMAE command memory and set DMAE command go */
503 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
518 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
524 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526 return opcode & ~DMAE_CMD_SRC_RESET;
529 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
554 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
557 memset(dmae, 0, sizeof(struct dmae_command));
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
569 /* issue a dmae command over the init-channel and wailt for completion */
570 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
580 /* lock the dmae channel */
581 mutex_lock(&bp->dmae_mutex);
583 /* reset completion */
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
589 /* wait for completion */
591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
595 BNX2X_ERR("DMAE timeout!\n");
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
612 mutex_unlock(&bp->dmae_mutex);
616 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
619 struct dmae_command dmae;
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
633 /* fill in addresses and len */
634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
646 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
648 struct dmae_command dmae;
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
664 /* fill in addresses and len */
665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
677 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
683 while (len > dmae_wr_max) {
684 bnx2x_write_dmae(bp, phys_addr + offset,
685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
693 /* used only for slowpath so not inlined */
694 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
704 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708 REG_RD_DMAE(bp, reg, wb_data, 2);
710 return HILO_U64(wb_data[0], wb_data[1]);
714 static int bnx2x_mc_assert(struct bnx2x *bp)
718 u32 row0, row1, row2, row3;
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
835 static void bnx2x_fw_dump(struct bnx2x *bp)
841 u32 trace_shmem_base;
843 BNX2X_ERR("NO MCP - can not dump\n");
847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
852 mark = REG_RD(bp, addr);
853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
855 pr_err("begin fw dump (mark 0x%x)\n", mark);
858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
859 for (word = 0; word < 8; word++)
860 data[word] = htonl(REG_RD(bp, offset + 4*word));
862 pr_cont("%s", (char *)data);
864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
865 for (word = 0; word < 8; word++)
866 data[word] = htonl(REG_RD(bp, offset + 4*word));
868 pr_cont("%s", (char *)data);
870 pr_err("end of fw dump\n");
873 void bnx2x_panic_dump(struct bnx2x *bp)
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879 #ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
886 BNX2X_ERR("begin crash dump -----------------\n");
890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
891 " spq_prod_idx(0x%x)\n",
892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
921 for_each_queue(bp, i) {
922 struct bnx2x_fastpath *fp = &bp->fp[i];
924 struct hc_status_block_data_e2 sb_data_e2;
925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
928 sb_data_e2.common.state_machine :
929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
932 sb_data_e2.index_data :
933 sb_data_e1x.index_data;
938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
939 " rx_comp_prod(0x%x)"
940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
941 i, fp->rx_bd_prod, fp->rx_bd_cons,
943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
945 " fp_hc_idx(0x%x)\n",
946 fp->rx_sge_prod, fp->last_max_sge,
947 le16_to_cpu(fp->fp_hc_idx));
950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
977 sb_data_p = CHIP_IS_E2(bp) ?
980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1026 #ifdef BNX2X_STOP_ON_ERROR
1029 for_each_queue(bp, i) {
1030 struct bnx2x_fastpath *fp = &bp->fp[i];
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1034 for (j = start; j != end; j = RX_BD(j + 1)) {
1035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
1044 for (j = start; j != end; j = RX_SGE(j + 1)) {
1045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
1054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1063 for_each_queue(bp, i) {
1064 struct bnx2x_fastpath *fp = &bp->fp[i];
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
1090 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1092 int port = BP_PORT(bp);
1093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
1101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1114 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1117 REG_WR(bp, addr, val);
1119 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1122 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1123 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1125 REG_WR(bp, addr, val);
1127 * Ensure that HC_CONFIG is written before leading/trailing edge config
1132 if (!CHIP_IS_E1(bp)) {
1133 /* init leading/trailing edge */
1135 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1137 /* enable nig and gpio3 attention */
1142 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1143 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1146 /* Make sure that interrupts are indeed enabled from here on */
1150 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1153 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1154 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1156 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1159 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1160 IGU_PF_CONF_SINGLE_ISR_EN);
1161 val |= (IGU_PF_CONF_FUNC_EN |
1162 IGU_PF_CONF_MSI_MSIX_EN |
1163 IGU_PF_CONF_ATTN_BIT_EN);
1165 val &= ~IGU_PF_CONF_INT_LINE_EN;
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN |
1169 IGU_PF_CONF_SINGLE_ISR_EN);
1171 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1172 val |= (IGU_PF_CONF_FUNC_EN |
1173 IGU_PF_CONF_INT_LINE_EN |
1174 IGU_PF_CONF_ATTN_BIT_EN |
1175 IGU_PF_CONF_SINGLE_ISR_EN);
1178 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1179 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1181 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1185 /* init leading/trailing edge */
1187 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1189 /* enable nig and gpio3 attention */
1194 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1195 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1197 /* Make sure that interrupts are indeed enabled from here on */
1201 void bnx2x_int_enable(struct bnx2x *bp)
1203 if (bp->common.int_block == INT_BLOCK_HC)
1204 bnx2x_hc_int_enable(bp);
1206 bnx2x_igu_int_enable(bp);
1209 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1211 int port = BP_PORT(bp);
1212 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1213 u32 val = REG_RD(bp, addr);
1215 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1216 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1217 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1218 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1220 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1223 /* flush all outstanding writes */
1226 REG_WR(bp, addr, val);
1227 if (REG_RD(bp, addr) != val)
1228 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1231 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1233 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1235 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1236 IGU_PF_CONF_INT_LINE_EN |
1237 IGU_PF_CONF_ATTN_BIT_EN);
1239 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1241 /* flush all outstanding writes */
1244 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1245 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1246 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1249 void bnx2x_int_disable(struct bnx2x *bp)
1251 if (bp->common.int_block == INT_BLOCK_HC)
1252 bnx2x_hc_int_disable(bp);
1254 bnx2x_igu_int_disable(bp);
1257 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1259 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1262 /* disable interrupt handling */
1263 atomic_inc(&bp->intr_sem);
1264 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1267 /* prevent the HW from sending interrupts */
1268 bnx2x_int_disable(bp);
1270 /* make sure all ISRs are done */
1272 synchronize_irq(bp->msix_table[0].vector);
1277 for_each_queue(bp, i)
1278 synchronize_irq(bp->msix_table[i + offset].vector);
1280 synchronize_irq(bp->pdev->irq);
1282 /* make sure sp_task is not running */
1283 cancel_delayed_work(&bp->sp_task);
1284 flush_workqueue(bnx2x_wq);
1290 * General service functions
1293 /* Return true if succeeded to acquire the lock */
1294 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1297 u32 resource_bit = (1 << resource);
1298 int func = BP_FUNC(bp);
1299 u32 hw_lock_control_reg;
1301 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1303 /* Validating that the resource is within range */
1304 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1306 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1307 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1312 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1314 hw_lock_control_reg =
1315 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1317 /* Try to acquire the lock */
1318 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1319 lock_status = REG_RD(bp, hw_lock_control_reg);
1320 if (lock_status & resource_bit)
1323 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1328 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1331 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1332 union eth_rx_cqe *rr_cqe)
1334 struct bnx2x *bp = fp->bp;
1335 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1336 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1339 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1340 fp->index, cid, command, bp->state,
1341 rr_cqe->ramrod_cqe.ramrod_type);
1343 switch (command | fp->state) {
1344 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1345 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1346 fp->state = BNX2X_FP_STATE_OPEN;
1349 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1350 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1351 fp->state = BNX2X_FP_STATE_HALTED;
1354 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1355 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1356 fp->state = BNX2X_FP_STATE_TERMINATED;
1360 BNX2X_ERR("unexpected MC reply (%d) "
1361 "fp[%d] state is %x\n",
1362 command, fp->index, fp->state);
1366 smp_mb__before_atomic_inc();
1367 atomic_inc(&bp->spq_left);
1368 /* push the change in fp->state and towards the memory */
1374 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1376 struct bnx2x *bp = netdev_priv(dev_instance);
1377 u16 status = bnx2x_ack_int(bp);
1381 /* Return here if interrupt is shared and it's not for us */
1382 if (unlikely(status == 0)) {
1383 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1386 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1388 /* Return here if interrupt is disabled */
1389 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1390 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1394 #ifdef BNX2X_STOP_ON_ERROR
1395 if (unlikely(bp->panic))
1399 for_each_queue(bp, i) {
1400 struct bnx2x_fastpath *fp = &bp->fp[i];
1402 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1403 if (status & mask) {
1404 /* Handle Rx and Tx according to SB id */
1405 prefetch(fp->rx_cons_sb);
1406 prefetch(fp->tx_cons_sb);
1407 prefetch(&fp->sb_running_index[SM_RX_ID]);
1408 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1415 if (status & (mask | 0x1)) {
1416 struct cnic_ops *c_ops = NULL;
1419 c_ops = rcu_dereference(bp->cnic_ops);
1421 c_ops->cnic_handler(bp->cnic_data, NULL);
1428 if (unlikely(status & 0x1)) {
1429 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1436 if (unlikely(status))
1437 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1443 /* end of fast path */
1449 * General service functions
1452 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1455 u32 resource_bit = (1 << resource);
1456 int func = BP_FUNC(bp);
1457 u32 hw_lock_control_reg;
1460 /* Validating that the resource is within range */
1461 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1463 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1464 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1469 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1471 hw_lock_control_reg =
1472 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1475 /* Validating that the resource is not already taken */
1476 lock_status = REG_RD(bp, hw_lock_control_reg);
1477 if (lock_status & resource_bit) {
1478 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1479 lock_status, resource_bit);
1483 /* Try for 5 second every 5ms */
1484 for (cnt = 0; cnt < 1000; cnt++) {
1485 /* Try to acquire the lock */
1486 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1487 lock_status = REG_RD(bp, hw_lock_control_reg);
1488 if (lock_status & resource_bit)
1493 DP(NETIF_MSG_HW, "Timeout\n");
1497 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1500 u32 resource_bit = (1 << resource);
1501 int func = BP_FUNC(bp);
1502 u32 hw_lock_control_reg;
1504 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1506 /* Validating that the resource is within range */
1507 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1509 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1510 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1515 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1517 hw_lock_control_reg =
1518 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1521 /* Validating that the resource is currently taken */
1522 lock_status = REG_RD(bp, hw_lock_control_reg);
1523 if (!(lock_status & resource_bit)) {
1524 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1525 lock_status, resource_bit);
1529 REG_WR(bp, hw_lock_control_reg, resource_bit);
1534 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1536 /* The GPIO should be swapped if swap register is set and active */
1537 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1538 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1539 int gpio_shift = gpio_num +
1540 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1541 u32 gpio_mask = (1 << gpio_shift);
1545 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1546 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1550 /* read GPIO value */
1551 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1553 /* get the requested pin value */
1554 if ((gpio_reg & gpio_mask) == gpio_mask)
1559 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1564 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1566 /* The GPIO should be swapped if swap register is set and active */
1567 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1568 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1569 int gpio_shift = gpio_num +
1570 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1571 u32 gpio_mask = (1 << gpio_shift);
1574 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1575 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1579 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1580 /* read GPIO and mask except the float bits */
1581 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1584 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1585 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1586 gpio_num, gpio_shift);
1587 /* clear FLOAT and set CLR */
1588 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1589 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1592 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1593 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1594 gpio_num, gpio_shift);
1595 /* clear FLOAT and set SET */
1596 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1597 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1600 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1601 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1602 gpio_num, gpio_shift);
1604 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1611 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1612 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1617 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1619 /* The GPIO should be swapped if swap register is set and active */
1620 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1621 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1622 int gpio_shift = gpio_num +
1623 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1624 u32 gpio_mask = (1 << gpio_shift);
1627 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1628 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1632 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1634 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1637 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1638 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1639 "output low\n", gpio_num, gpio_shift);
1640 /* clear SET and set CLR */
1641 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1642 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1645 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1646 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1647 "output high\n", gpio_num, gpio_shift);
1648 /* clear CLR and set SET */
1649 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1650 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1657 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1658 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1663 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1665 u32 spio_mask = (1 << spio_num);
1668 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1669 (spio_num > MISC_REGISTERS_SPIO_7)) {
1670 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1674 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1675 /* read SPIO and mask except the float bits */
1676 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1679 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1680 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1681 /* clear FLOAT and set CLR */
1682 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1683 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1686 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1687 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1688 /* clear FLOAT and set SET */
1689 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1690 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1693 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1694 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1696 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1703 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1704 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1709 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1711 u32 sel_phy_idx = 0;
1712 if (bp->link_vars.link_up) {
1713 sel_phy_idx = EXT_PHY1;
1714 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1715 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1716 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1717 sel_phy_idx = EXT_PHY2;
1720 switch (bnx2x_phy_selection(&bp->link_params)) {
1721 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1722 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1723 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1724 sel_phy_idx = EXT_PHY1;
1726 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1727 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1728 sel_phy_idx = EXT_PHY2;
1733 * The selected actived PHY is always after swapping (in case PHY
1734 * swapping is enabled). So when swapping is enabled, we need to reverse
1738 if (bp->link_params.multi_phy_config &
1739 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1740 if (sel_phy_idx == EXT_PHY1)
1741 sel_phy_idx = EXT_PHY2;
1742 else if (sel_phy_idx == EXT_PHY2)
1743 sel_phy_idx = EXT_PHY1;
1745 return LINK_CONFIG_IDX(sel_phy_idx);
1748 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1750 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1751 switch (bp->link_vars.ieee_fc &
1752 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1753 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1754 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1758 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1759 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1763 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1764 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1768 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1774 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1776 if (!BP_NOMCP(bp)) {
1778 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1779 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1780 /* Initialize link parameters structure variables */
1781 /* It is recommended to turn off RX FC for jumbo frames
1782 for better performance */
1783 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1784 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1786 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1788 bnx2x_acquire_phy_lock(bp);
1790 if (load_mode == LOAD_DIAG) {
1791 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1792 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1795 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1797 bnx2x_release_phy_lock(bp);
1799 bnx2x_calc_fc_adv(bp);
1801 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1802 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1803 bnx2x_link_report(bp);
1805 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1808 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1812 void bnx2x_link_set(struct bnx2x *bp)
1814 if (!BP_NOMCP(bp)) {
1815 bnx2x_acquire_phy_lock(bp);
1816 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1817 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1818 bnx2x_release_phy_lock(bp);
1820 bnx2x_calc_fc_adv(bp);
1822 BNX2X_ERR("Bootcode is missing - can not set link\n");
1825 static void bnx2x__link_reset(struct bnx2x *bp)
1827 if (!BP_NOMCP(bp)) {
1828 bnx2x_acquire_phy_lock(bp);
1829 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1830 bnx2x_release_phy_lock(bp);
1832 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1835 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1839 if (!BP_NOMCP(bp)) {
1840 bnx2x_acquire_phy_lock(bp);
1841 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1843 bnx2x_release_phy_lock(bp);
1845 BNX2X_ERR("Bootcode is missing - can not test link\n");
1850 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1852 u32 r_param = bp->link_vars.line_speed / 8;
1853 u32 fair_periodic_timeout_usec;
1856 memset(&(bp->cmng.rs_vars), 0,
1857 sizeof(struct rate_shaping_vars_per_port));
1858 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1860 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1861 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1863 /* this is the threshold below which no timer arming will occur
1864 1.25 coefficient is for the threshold to be a little bigger
1865 than the real time, to compensate for timer in-accuracy */
1866 bp->cmng.rs_vars.rs_threshold =
1867 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1869 /* resolution of fairness timer */
1870 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1871 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1872 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1874 /* this is the threshold below which we won't arm the timer anymore */
1875 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1877 /* we multiply by 1e3/8 to get bytes/msec.
1878 We don't want the credits to pass a credit
1879 of the t_fair*FAIR_MEM (algorithm resolution) */
1880 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1881 /* since each tick is 4 usec */
1882 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1885 /* Calculates the sum of vn_min_rates.
1886 It's needed for further normalizing of the min_rates.
1888 sum of vn_min_rates.
1890 0 - if all the min_rates are 0.
1891 In the later case fainess algorithm should be deactivated.
1892 If not all min_rates are zero then those that are zeroes will be set to 1.
1894 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1899 bp->vn_weight_sum = 0;
1900 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1901 u32 vn_cfg = bp->mf_config[vn];
1902 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1903 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1905 /* Skip hidden vns */
1906 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1909 /* If min rate is zero - set it to 1 */
1911 vn_min_rate = DEF_MIN_RATE;
1915 bp->vn_weight_sum += vn_min_rate;
1918 /* ... only if all min rates are zeros - disable fairness */
1920 bp->cmng.flags.cmng_enables &=
1921 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1922 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1923 " fairness will be disabled\n");
1925 bp->cmng.flags.cmng_enables |=
1926 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1929 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1931 struct rate_shaping_vars_per_vn m_rs_vn;
1932 struct fairness_vars_per_vn m_fair_vn;
1933 u32 vn_cfg = bp->mf_config[vn];
1934 int func = 2*vn + BP_PORT(bp);
1935 u16 vn_min_rate, vn_max_rate;
1938 /* If function is hidden - set min and max to zeroes */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1944 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1946 /* If min rate is zero - set it to 1 */
1947 if (bp->vn_weight_sum && (vn_min_rate == 0))
1948 vn_min_rate = DEF_MIN_RATE;
1949 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1950 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1954 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1955 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1957 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1958 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1960 /* global vn counter - maximal Mbps for this vn */
1961 m_rs_vn.vn_counter.rate = vn_max_rate;
1963 /* quota - number of bytes transmitted in this period */
1964 m_rs_vn.vn_counter.quota =
1965 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1967 if (bp->vn_weight_sum) {
1968 /* credit for each period of the fairness algorithm:
1969 number of bytes in T_FAIR (the vn share the port rate).
1970 vn_weight_sum should not be larger than 10000, thus
1971 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1973 m_fair_vn.vn_credit_delta =
1974 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1975 (8 * bp->vn_weight_sum))),
1976 (bp->cmng.fair_vars.fair_threshold * 2));
1977 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1978 m_fair_vn.vn_credit_delta);
1981 /* Store it to internal memory */
1982 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1983 REG_WR(bp, BAR_XSTRORM_INTMEM +
1984 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1985 ((u32 *)(&m_rs_vn))[i]);
1987 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1988 REG_WR(bp, BAR_XSTRORM_INTMEM +
1989 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1990 ((u32 *)(&m_fair_vn))[i]);
1993 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1995 if (CHIP_REV_IS_SLOW(bp))
1996 return CMNG_FNS_NONE;
1998 return CMNG_FNS_MINMAX;
2000 return CMNG_FNS_NONE;
2003 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2008 return; /* what should be the default bvalue in this case */
2010 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2011 int /*abs*/func = 2*vn + BP_PORT(bp);
2013 MF_CFG_RD(bp, func_mf_config[func].config);
2017 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2020 if (cmng_type == CMNG_FNS_MINMAX) {
2023 /* clear cmng_enables */
2024 bp->cmng.flags.cmng_enables = 0;
2026 /* read mf conf from shmem */
2028 bnx2x_read_mf_cfg(bp);
2030 /* Init rate shaping and fairness contexts */
2031 bnx2x_init_port_minmax(bp);
2033 /* vn_weight_sum and enable fairness if not 0 */
2034 bnx2x_calc_vn_weight_sum(bp);
2036 /* calculate and set min-max rate for each vn */
2037 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2038 bnx2x_init_vn_minmax(bp, vn);
2040 /* always enable rate shaping and fairness */
2041 bp->cmng.flags.cmng_enables |=
2042 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2043 if (!bp->vn_weight_sum)
2044 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2045 " fairness will be disabled\n");
2049 /* rate shaping and fairness are disabled */
2051 "rate shaping and fairness are disabled\n");
2054 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2056 int port = BP_PORT(bp);
2060 /* Set the attention towards other drivers on the same port */
2061 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2062 if (vn == BP_E1HVN(bp))
2065 func = ((vn << 1) | port);
2066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2067 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2071 /* This function is called upon link interrupt */
2072 static void bnx2x_link_attn(struct bnx2x *bp)
2074 u32 prev_link_status = bp->link_vars.link_status;
2075 /* Make sure that we are synced with the current statistics */
2076 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2078 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2080 if (bp->link_vars.link_up) {
2082 /* dropless flow control */
2083 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2084 int port = BP_PORT(bp);
2085 u32 pause_enabled = 0;
2087 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2090 REG_WR(bp, BAR_USTRORM_INTMEM +
2091 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2095 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2096 struct host_port_stats *pstats;
2098 pstats = bnx2x_sp(bp, port_stats);
2099 /* reset old bmac stats */
2100 memset(&(pstats->mac_stx[0]), 0,
2101 sizeof(struct mac_stx));
2103 if (bp->state == BNX2X_STATE_OPEN)
2104 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2107 /* indicate link status only if link status actually changed */
2108 if (prev_link_status != bp->link_vars.link_status)
2109 bnx2x_link_report(bp);
2112 bnx2x_link_sync_notify(bp);
2114 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2115 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2117 if (cmng_fns != CMNG_FNS_NONE) {
2118 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2119 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2121 /* rate shaping and fairness are disabled */
2123 "single function mode without fairness\n");
2127 void bnx2x__link_status_update(struct bnx2x *bp)
2129 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2132 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2134 if (bp->link_vars.link_up)
2135 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2137 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2139 /* the link status update could be the result of a DCC event
2140 hence re-read the shmem mf configuration */
2141 bnx2x_read_mf_cfg(bp);
2143 /* indicate link status */
2144 bnx2x_link_report(bp);
2147 static void bnx2x_pmf_update(struct bnx2x *bp)
2149 int port = BP_PORT(bp);
2153 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2155 /* enable nig attention */
2156 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2157 if (bp->common.int_block == INT_BLOCK_HC) {
2158 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2160 } else if (CHIP_IS_E2(bp)) {
2161 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2162 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2165 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2173 * General service functions
2176 /* send the MCP a request, block until there is a reply */
2177 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2179 int mb_idx = BP_FW_MB_IDX(bp);
2180 u32 seq = ++bp->fw_seq;
2183 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2185 mutex_lock(&bp->fw_mb_mutex);
2186 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2189 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2192 /* let the FW do it's magic ... */
2195 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2197 /* Give the FW up to 5 second (500*10ms) */
2198 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2200 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2201 cnt*delay, rc, seq);
2203 /* is this a reply to our command? */
2204 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2205 rc &= FW_MSG_CODE_MASK;
2208 BNX2X_ERR("FW failed to respond!\n");
2212 mutex_unlock(&bp->fw_mb_mutex);
2217 /* must be called under rtnl_lock */
2218 void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2220 u32 mask = (1 << cl_id);
2222 /* initial seeting is BNX2X_ACCEPT_NONE */
2223 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2224 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2225 u8 unmatched_unicast = 0;
2227 if (filters & BNX2X_PROMISCUOUS_MODE) {
2228 /* promiscious - accept all, drop none */
2229 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2230 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2232 if (filters & BNX2X_ACCEPT_UNICAST) {
2233 /* accept matched ucast */
2236 if (filters & BNX2X_ACCEPT_MULTICAST) {
2237 /* accept matched mcast */
2240 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2241 /* accept all mcast */
2245 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2246 /* accept all mcast */
2250 if (filters & BNX2X_ACCEPT_BROADCAST) {
2251 /* accept (all) bcast */
2256 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2257 bp->mac_filters.ucast_drop_all | mask :
2258 bp->mac_filters.ucast_drop_all & ~mask;
2260 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2261 bp->mac_filters.mcast_drop_all | mask :
2262 bp->mac_filters.mcast_drop_all & ~mask;
2264 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2265 bp->mac_filters.bcast_drop_all | mask :
2266 bp->mac_filters.bcast_drop_all & ~mask;
2268 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2269 bp->mac_filters.ucast_accept_all | mask :
2270 bp->mac_filters.ucast_accept_all & ~mask;
2272 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2273 bp->mac_filters.mcast_accept_all | mask :
2274 bp->mac_filters.mcast_accept_all & ~mask;
2276 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2277 bp->mac_filters.bcast_accept_all | mask :
2278 bp->mac_filters.bcast_accept_all & ~mask;
2280 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2281 bp->mac_filters.unmatched_unicast | mask :
2282 bp->mac_filters.unmatched_unicast & ~mask;
2285 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2287 if (FUNC_CONFIG(p->func_flgs)) {
2288 struct tstorm_eth_function_common_config tcfg = {0};
2291 if (p->func_flgs & FUNC_FLG_TPA)
2292 tcfg.config_flags |=
2293 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2296 if (p->func_flgs & FUNC_FLG_RSS) {
2297 u16 rss_flgs = (p->rss->mode <<
2298 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2300 if (p->rss->cap & RSS_IPV4_CAP)
2301 rss_flgs |= RSS_IPV4_CAP_MASK;
2302 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2303 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2304 if (p->rss->cap & RSS_IPV6_CAP)
2305 rss_flgs |= RSS_IPV6_CAP_MASK;
2306 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2307 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2309 tcfg.config_flags |= rss_flgs;
2310 tcfg.rss_result_mask = p->rss->result_mask;
2314 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2317 /* Enable the function in the FW */
2318 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2319 storm_memset_func_en(bp, p->func_id, 1);
2322 if (p->func_flgs & FUNC_FLG_STATS) {
2323 struct stats_indication_flags stats_flags = {0};
2324 stats_flags.collect_eth = 1;
2326 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2327 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2329 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2330 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2332 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2333 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2335 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2336 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2340 if (p->func_flgs & FUNC_FLG_SPQ) {
2341 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2342 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2343 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2347 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2348 struct bnx2x_fastpath *fp)
2352 /* calculate queue flags */
2353 flags |= QUEUE_FLG_CACHE_ALIGN;
2354 flags |= QUEUE_FLG_HC;
2355 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2358 flags |= QUEUE_FLG_VLAN;
2359 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2362 if (!fp->disable_tpa)
2363 flags |= QUEUE_FLG_TPA;
2365 flags |= QUEUE_FLG_STATS;
2370 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2371 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2372 struct bnx2x_rxq_init_params *rxq_init)
2376 u16 tpa_agg_size = 0;
2378 /* calculate queue flags */
2379 u16 flags = bnx2x_get_cl_flags(bp, fp);
2381 if (!fp->disable_tpa) {
2382 pause->sge_th_hi = 250;
2383 pause->sge_th_lo = 150;
2384 tpa_agg_size = min_t(u32,
2385 (min_t(u32, 8, MAX_SKB_FRAGS) *
2386 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2387 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2389 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2390 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2391 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2395 /* pause - not for e1 */
2396 if (!CHIP_IS_E1(bp)) {
2397 pause->bd_th_hi = 350;
2398 pause->bd_th_lo = 250;
2399 pause->rcq_th_hi = 350;
2400 pause->rcq_th_lo = 250;
2401 pause->sge_th_hi = 0;
2402 pause->sge_th_lo = 0;
2407 rxq_init->flags = flags;
2408 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2409 rxq_init->dscr_map = fp->rx_desc_mapping;
2410 rxq_init->sge_map = fp->rx_sge_mapping;
2411 rxq_init->rcq_map = fp->rx_comp_mapping;
2412 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2413 rxq_init->mtu = bp->dev->mtu;
2414 rxq_init->buf_sz = bp->rx_buf_size;
2415 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2416 rxq_init->cl_id = fp->cl_id;
2417 rxq_init->spcl_id = fp->cl_id;
2418 rxq_init->stat_id = fp->cl_id;
2419 rxq_init->tpa_agg_sz = tpa_agg_size;
2420 rxq_init->sge_buf_sz = sge_sz;
2421 rxq_init->max_sges_pkt = max_sge;
2422 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2423 rxq_init->fw_sb_id = fp->fw_sb_id;
2425 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2427 rxq_init->cid = HW_CID(bp, fp->cid);
2429 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2432 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2433 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2435 u16 flags = bnx2x_get_cl_flags(bp, fp);
2437 txq_init->flags = flags;
2438 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2439 txq_init->dscr_map = fp->tx_desc_mapping;
2440 txq_init->stat_id = fp->cl_id;
2441 txq_init->cid = HW_CID(bp, fp->cid);
2442 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2443 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2444 txq_init->fw_sb_id = fp->fw_sb_id;
2445 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2448 void bnx2x_pf_init(struct bnx2x *bp)
2450 struct bnx2x_func_init_params func_init = {0};
2451 struct bnx2x_rss_params rss = {0};
2452 struct event_ring_data eq_data = { {0} };
2455 /* pf specific setups */
2456 if (!CHIP_IS_E1(bp))
2457 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2459 if (CHIP_IS_E2(bp)) {
2460 /* reset IGU PF statistics: MSIX + ATTN */
2462 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2463 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2464 (CHIP_MODE_IS_4_PORT(bp) ?
2465 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2467 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2468 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2469 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2470 (CHIP_MODE_IS_4_PORT(bp) ?
2471 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2474 /* function setup flags */
2475 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2477 if (CHIP_IS_E1x(bp))
2478 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2480 flags |= FUNC_FLG_TPA;
2483 * Although RSS is meaningless when there is a single HW queue we
2484 * still need it enabled in order to have HW Rx hash generated.
2486 * if (is_eth_multi(bp))
2487 * flags |= FUNC_FLG_RSS;
2489 flags |= FUNC_FLG_RSS;
2491 /* function setup */
2492 if (flags & FUNC_FLG_RSS) {
2493 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2494 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2495 rss.mode = bp->multi_mode;
2496 rss.result_mask = MULTI_MASK;
2497 func_init.rss = &rss;
2500 func_init.func_flgs = flags;
2501 func_init.pf_id = BP_FUNC(bp);
2502 func_init.func_id = BP_FUNC(bp);
2503 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2504 func_init.spq_map = bp->spq_mapping;
2505 func_init.spq_prod = bp->spq_prod_idx;
2507 bnx2x_func_init(bp, &func_init);
2509 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2512 Congestion management values depend on the link rate
2513 There is no active link so initial link rate is set to 10 Gbps.
2514 When the link comes up The congestion management values are
2515 re-calculated according to the actual link rate.
2517 bp->link_vars.line_speed = SPEED_10000;
2518 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2520 /* Only the PMF sets the HW */
2522 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2524 /* no rx until link is up */
2525 bp->rx_mode = BNX2X_RX_MODE_NONE;
2526 bnx2x_set_storm_rx_mode(bp);
2528 /* init Event Queue */
2529 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2530 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2531 eq_data.producer = bp->eq_prod;
2532 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2533 eq_data.sb_id = DEF_SB_ID;
2534 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2538 static void bnx2x_e1h_disable(struct bnx2x *bp)
2540 int port = BP_PORT(bp);
2542 netif_tx_disable(bp->dev);
2544 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2546 netif_carrier_off(bp->dev);
2549 static void bnx2x_e1h_enable(struct bnx2x *bp)
2551 int port = BP_PORT(bp);
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2559 * Should not call netif_carrier_on since it will be called if the link
2560 * is up when checking for link state
2564 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2566 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2568 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2571 * This is the only place besides the function initialization
2572 * where the bp->flags can change so it is done without any
2575 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2576 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2577 bp->flags |= MF_FUNC_DIS;
2579 bnx2x_e1h_disable(bp);
2581 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2582 bp->flags &= ~MF_FUNC_DIS;
2584 bnx2x_e1h_enable(bp);
2586 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2588 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2590 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2591 bnx2x_link_sync_notify(bp);
2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2593 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2596 /* Report results to MCP */
2598 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2600 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2603 /* must be called under the spq lock */
2604 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2606 struct eth_spe *next_spe = bp->spq_prod_bd;
2608 if (bp->spq_prod_bd == bp->spq_last_bd) {
2609 bp->spq_prod_bd = bp->spq;
2610 bp->spq_prod_idx = 0;
2611 DP(NETIF_MSG_TIMER, "end of spq\n");
2619 /* must be called under the spq lock */
2620 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2622 int func = BP_FUNC(bp);
2624 /* Make sure that BD data is updated before writing the producer */
2627 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2632 /* the slow path queue is odd since completions arrive on the fastpath ring */
2633 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2634 u32 data_hi, u32 data_lo, int common)
2636 struct eth_spe *spe;
2639 #ifdef BNX2X_STOP_ON_ERROR
2640 if (unlikely(bp->panic))
2644 spin_lock_bh(&bp->spq_lock);
2646 if (!atomic_read(&bp->spq_left)) {
2647 BNX2X_ERR("BUG! SPQ ring full!\n");
2648 spin_unlock_bh(&bp->spq_lock);
2653 spe = bnx2x_sp_get_next(bp);
2655 /* CID needs port number to be encoded int it */
2656 spe->hdr.conn_and_cmd_data =
2657 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2662 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2663 * TRAFFIC_STOP, TRAFFIC_START
2665 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2666 & SPE_HDR_CONN_TYPE;
2668 /* ETH ramrods: SETUP, HALT */
2669 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2670 & SPE_HDR_CONN_TYPE;
2672 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2673 SPE_HDR_FUNCTION_ID);
2675 spe->hdr.type = cpu_to_le16(type);
2677 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2678 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2680 /* stats ramrod has it's own slot on the spq */
2681 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2682 /* It's ok if the actual decrement is issued towards the memory
2683 * somewhere between the spin_lock and spin_unlock. Thus no
2684 * more explict memory barrier is needed.
2686 atomic_dec(&bp->spq_left);
2688 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2689 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2690 "type(0x%x) left %x\n",
2691 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2692 (u32)(U64_LO(bp->spq_mapping) +
2693 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2694 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2696 bnx2x_sp_prod_update(bp);
2697 spin_unlock_bh(&bp->spq_lock);
2701 /* acquire split MCP access lock register */
2702 static int bnx2x_acquire_alr(struct bnx2x *bp)
2708 for (j = 0; j < 1000; j++) {
2710 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2711 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2712 if (val & (1L << 31))
2717 if (!(val & (1L << 31))) {
2718 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2725 /* release split MCP access lock register */
2726 static void bnx2x_release_alr(struct bnx2x *bp)
2728 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2731 #define BNX2X_DEF_SB_ATT_IDX 0x0001
2732 #define BNX2X_DEF_SB_IDX 0x0002
2734 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2736 struct host_sp_status_block *def_sb = bp->def_status_blk;
2739 barrier(); /* status block is written to by the chip */
2740 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2741 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2742 rc |= BNX2X_DEF_SB_ATT_IDX;
2745 if (bp->def_idx != def_sb->sp_sb.running_index) {
2746 bp->def_idx = def_sb->sp_sb.running_index;
2747 rc |= BNX2X_DEF_SB_IDX;
2750 /* Do not reorder: indecies reading should complete before handling */
2756 * slow path service functions
2759 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2761 int port = BP_PORT(bp);
2762 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2763 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2764 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2765 NIG_REG_MASK_INTERRUPT_PORT0;
2770 if (bp->attn_state & asserted)
2771 BNX2X_ERR("IGU ERROR\n");
2773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774 aeu_mask = REG_RD(bp, aeu_addr);
2776 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2777 aeu_mask, asserted);
2778 aeu_mask &= ~(asserted & 0x3ff);
2779 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2781 REG_WR(bp, aeu_addr, aeu_mask);
2782 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2784 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2785 bp->attn_state |= asserted;
2786 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2788 if (asserted & ATTN_HARD_WIRED_MASK) {
2789 if (asserted & ATTN_NIG_FOR_FUNC) {
2791 bnx2x_acquire_phy_lock(bp);
2793 /* save nig interrupt mask */
2794 nig_mask = REG_RD(bp, nig_int_mask_addr);
2795 REG_WR(bp, nig_int_mask_addr, 0);
2797 bnx2x_link_attn(bp);
2799 /* handle unicore attn? */
2801 if (asserted & ATTN_SW_TIMER_4_FUNC)
2802 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2804 if (asserted & GPIO_2_FUNC)
2805 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2807 if (asserted & GPIO_3_FUNC)
2808 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2810 if (asserted & GPIO_4_FUNC)
2811 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2814 if (asserted & ATTN_GENERAL_ATTN_1) {
2815 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2816 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2818 if (asserted & ATTN_GENERAL_ATTN_2) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2822 if (asserted & ATTN_GENERAL_ATTN_3) {
2823 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2824 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2827 if (asserted & ATTN_GENERAL_ATTN_4) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2831 if (asserted & ATTN_GENERAL_ATTN_5) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2835 if (asserted & ATTN_GENERAL_ATTN_6) {
2836 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2837 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2841 } /* if hardwired */
2843 if (bp->common.int_block == INT_BLOCK_HC)
2844 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2845 COMMAND_REG_ATTN_BITS_SET);
2847 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2849 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2850 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2851 REG_WR(bp, reg_addr, asserted);
2853 /* now set back the mask */
2854 if (asserted & ATTN_NIG_FOR_FUNC) {
2855 REG_WR(bp, nig_int_mask_addr, nig_mask);
2856 bnx2x_release_phy_lock(bp);
2860 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2862 int port = BP_PORT(bp);
2864 /* mark the failure */
2867 dev_info.port_hw_config[port].external_phy_config);
2869 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2870 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2871 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2874 /* log the failure */
2875 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2876 " the driver to shutdown the card to prevent permanent"
2877 " damage. Please contact OEM Support for assistance\n");
2880 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2882 int port = BP_PORT(bp);
2886 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2887 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2889 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2891 val = REG_RD(bp, reg_offset);
2892 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2893 REG_WR(bp, reg_offset, val);
2895 BNX2X_ERR("SPIO5 hw attention\n");
2897 /* Fan failure attention */
2898 bnx2x_hw_reset_phy(&bp->link_params);
2899 bnx2x_fan_failure(bp);
2902 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2903 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2904 bnx2x_acquire_phy_lock(bp);
2905 bnx2x_handle_module_detect_int(&bp->link_params);
2906 bnx2x_release_phy_lock(bp);
2909 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2911 val = REG_RD(bp, reg_offset);
2912 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2913 REG_WR(bp, reg_offset, val);
2915 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2916 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2921 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2925 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2927 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2928 BNX2X_ERR("DB hw attention 0x%x\n", val);
2929 /* DORQ discard attention */
2931 BNX2X_ERR("FATAL error from DORQ\n");
2934 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2936 int port = BP_PORT(bp);
2939 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2940 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2942 val = REG_RD(bp, reg_offset);
2943 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2944 REG_WR(bp, reg_offset, val);
2946 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2947 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2952 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2956 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2958 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2959 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2960 /* CFC error attention */
2962 BNX2X_ERR("FATAL error from CFC\n");
2965 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2967 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2968 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2969 /* RQ_USDMDP_FIFO_OVERFLOW */
2971 BNX2X_ERR("FATAL error from PXP\n");
2972 if (CHIP_IS_E2(bp)) {
2973 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2974 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2980 int port = BP_PORT(bp);
2983 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2986 val = REG_RD(bp, reg_offset);
2987 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988 REG_WR(bp, reg_offset, val);
2990 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2991 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2996 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3000 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3002 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003 int func = BP_FUNC(bp);
3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3006 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3007 func_mf_config[BP_ABS_FUNC(bp)].config);
3009 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3010 if (val & DRV_STATUS_DCC_EVENT_MASK)
3012 (val & DRV_STATUS_DCC_EVENT_MASK));
3013 bnx2x__link_status_update(bp);
3014 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3015 bnx2x_pmf_update(bp);
3017 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3019 BNX2X_ERR("MC assert!\n");
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3021 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3023 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3026 } else if (attn & BNX2X_MCP_ASSERT) {
3028 BNX2X_ERR("MCP assert!\n");
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3033 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3036 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3037 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3038 if (attn & BNX2X_GRC_TIMEOUT) {
3039 val = CHIP_IS_E1(bp) ? 0 :
3040 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3041 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3043 if (attn & BNX2X_GRC_RSV) {
3044 val = CHIP_IS_E1(bp) ? 0 :
3045 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3046 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3048 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3052 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3053 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3054 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3055 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3056 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3057 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3060 * should be run under rtnl lock
3062 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3064 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3065 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3066 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3072 * should be run under rtnl lock
3074 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3076 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3078 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3084 * should be run under rtnl lock
3086 bool bnx2x_reset_is_done(struct bnx2x *bp)
3088 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3089 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3090 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3094 * should be run under rtnl lock
3096 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3098 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3100 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3102 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3103 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3109 * should be run under rtnl lock
3111 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3113 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3115 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3117 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3118 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3126 * should be run under rtnl lock
3128 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3130 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3133 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3135 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3136 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3139 static inline void _print_next_block(int idx, const char *blk)
3146 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3150 for (i = 0; sig; i++) {
3151 cur_bit = ((u32)0x1 << i);
3152 if (sig & cur_bit) {
3154 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3155 _print_next_block(par_num++, "BRB");
3157 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3158 _print_next_block(par_num++, "PARSER");
3160 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3161 _print_next_block(par_num++, "TSDM");
3163 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3164 _print_next_block(par_num++, "SEARCHER");
3166 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3167 _print_next_block(par_num++, "TSEMI");
3179 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3183 for (i = 0; sig; i++) {
3184 cur_bit = ((u32)0x1 << i);
3185 if (sig & cur_bit) {
3187 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3188 _print_next_block(par_num++, "PBCLIENT");
3190 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3191 _print_next_block(par_num++, "QM");
3193 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3194 _print_next_block(par_num++, "XSDM");
3196 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3197 _print_next_block(par_num++, "XSEMI");
3199 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3200 _print_next_block(par_num++, "DOORBELLQ");
3202 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3203 _print_next_block(par_num++, "VAUX PCI CORE");
3205 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3206 _print_next_block(par_num++, "DEBUG");
3208 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3209 _print_next_block(par_num++, "USDM");
3211 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3212 _print_next_block(par_num++, "USEMI");
3214 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3215 _print_next_block(par_num++, "UPB");
3217 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3218 _print_next_block(par_num++, "CSDM");
3230 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3234 for (i = 0; sig; i++) {
3235 cur_bit = ((u32)0x1 << i);
3236 if (sig & cur_bit) {
3238 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3239 _print_next_block(par_num++, "CSEMI");
3241 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3242 _print_next_block(par_num++, "PXP");
3244 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3245 _print_next_block(par_num++,
3246 "PXPPCICLOCKCLIENT");
3248 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3249 _print_next_block(par_num++, "CFC");
3251 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3252 _print_next_block(par_num++, "CDU");
3254 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3255 _print_next_block(par_num++, "IGU");
3257 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3258 _print_next_block(par_num++, "MISC");
3270 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3274 for (i = 0; sig; i++) {
3275 cur_bit = ((u32)0x1 << i);
3276 if (sig & cur_bit) {
3278 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3279 _print_next_block(par_num++, "MCP ROM");
3281 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3282 _print_next_block(par_num++, "MCP UMP RX");
3284 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3285 _print_next_block(par_num++, "MCP UMP TX");
3287 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3288 _print_next_block(par_num++, "MCP SCPAD");
3300 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3303 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3304 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3306 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3307 "[0]:0x%08x [1]:0x%08x "
3308 "[2]:0x%08x [3]:0x%08x\n",
3309 sig0 & HW_PRTY_ASSERT_SET_0,
3310 sig1 & HW_PRTY_ASSERT_SET_1,
3311 sig2 & HW_PRTY_ASSERT_SET_2,
3312 sig3 & HW_PRTY_ASSERT_SET_3);
3313 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3315 par_num = bnx2x_print_blocks_with_parity0(
3316 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3317 par_num = bnx2x_print_blocks_with_parity1(
3318 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3319 par_num = bnx2x_print_blocks_with_parity2(
3320 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3321 par_num = bnx2x_print_blocks_with_parity3(
3322 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3329 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3331 struct attn_route attn;
3332 int port = BP_PORT(bp);
3334 attn.sig[0] = REG_RD(bp,
3335 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3337 attn.sig[1] = REG_RD(bp,
3338 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3340 attn.sig[2] = REG_RD(bp,
3341 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3343 attn.sig[3] = REG_RD(bp,
3344 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3347 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3352 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3355 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3357 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3358 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3359 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3360 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3362 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3363 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3364 "INCORRECT_RCV_BEHAVIOR\n");
3365 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3366 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3367 "WAS_ERROR_ATTN\n");
3368 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3369 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3370 "VF_LENGTH_VIOLATION_ATTN\n");
3372 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3373 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3374 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3376 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3377 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3378 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3379 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3380 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3381 "TCPL_ERROR_ATTN\n");
3382 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3383 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3384 "TCPL_IN_TWO_RCBS_ATTN\n");
3385 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3386 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3387 "CSSNOOP_FIFO_OVERFLOW\n");
3389 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3390 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3391 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3392 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3393 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3394 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3395 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3396 "_ATC_TCPL_TO_NOT_PEND\n");
3397 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3398 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3399 "ATC_GPA_MULTIPLE_HITS\n");
3400 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3401 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3402 "ATC_RCPL_TO_EMPTY_CNT\n");
3403 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3405 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3406 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3407 "ATC_IREQ_LESS_THAN_STU\n");
3410 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3411 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3412 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3413 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3414 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3419 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3421 struct attn_route attn, *group_mask;
3422 int port = BP_PORT(bp);
3428 /* need to take HW lock because MCP or other port might also
3429 try to handle this event */
3430 bnx2x_acquire_alr(bp);
3432 if (bnx2x_chk_parity_attn(bp)) {
3433 bp->recovery_state = BNX2X_RECOVERY_INIT;
3434 bnx2x_set_reset_in_progress(bp);
3435 schedule_delayed_work(&bp->reset_task, 0);
3436 /* Disable HW interrupts */
3437 bnx2x_int_disable(bp);
3438 bnx2x_release_alr(bp);
3439 /* In case of parity errors don't handle attentions so that
3440 * other function would "see" parity errors.
3445 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3446 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3447 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3448 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3451 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3455 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3456 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3458 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3459 if (deasserted & (1 << index)) {
3460 group_mask = &bp->attn_group[index];
3462 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3465 group_mask->sig[0], group_mask->sig[1],
3466 group_mask->sig[2], group_mask->sig[3],
3467 group_mask->sig[4]);
3469 bnx2x_attn_int_deasserted4(bp,
3470 attn.sig[4] & group_mask->sig[4]);
3471 bnx2x_attn_int_deasserted3(bp,
3472 attn.sig[3] & group_mask->sig[3]);
3473 bnx2x_attn_int_deasserted1(bp,
3474 attn.sig[1] & group_mask->sig[1]);
3475 bnx2x_attn_int_deasserted2(bp,
3476 attn.sig[2] & group_mask->sig[2]);
3477 bnx2x_attn_int_deasserted0(bp,
3478 attn.sig[0] & group_mask->sig[0]);
3482 bnx2x_release_alr(bp);
3484 if (bp->common.int_block == INT_BLOCK_HC)
3485 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3486 COMMAND_REG_ATTN_BITS_CLR);
3488 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3491 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3492 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3493 REG_WR(bp, reg_addr, val);
3495 if (~bp->attn_state & deasserted)
3496 BNX2X_ERR("IGU ERROR\n");
3498 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3499 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3501 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3502 aeu_mask = REG_RD(bp, reg_addr);
3504 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3505 aeu_mask, deasserted);
3506 aeu_mask |= (deasserted & 0x3ff);
3507 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3509 REG_WR(bp, reg_addr, aeu_mask);
3510 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3512 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3513 bp->attn_state &= ~deasserted;
3514 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3517 static void bnx2x_attn_int(struct bnx2x *bp)
3519 /* read local copy of bits */
3520 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3522 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3524 u32 attn_state = bp->attn_state;
3526 /* look for changed bits */
3527 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3528 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3531 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3532 attn_bits, attn_ack, asserted, deasserted);
3534 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3535 BNX2X_ERR("BAD attention state\n");
3537 /* handle bits that were raised */
3539 bnx2x_attn_int_asserted(bp, asserted);
3542 bnx2x_attn_int_deasserted(bp, deasserted);
3545 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3547 /* No memory barriers */
3548 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3549 mmiowb(); /* keep prod updates ordered */
3553 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3554 union event_ring_elem *elem)
3556 if (!bp->cnic_eth_dev.starting_cid ||
3557 cid < bp->cnic_eth_dev.starting_cid)
3560 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3562 if (unlikely(elem->message.data.cfc_del_event.error)) {
3563 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3565 bnx2x_panic_dump(bp);
3567 bnx2x_cnic_cfc_comp(bp, cid);
3572 static void bnx2x_eq_int(struct bnx2x *bp)
3574 u16 hw_cons, sw_cons, sw_prod;
3575 union event_ring_elem *elem;
3580 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3582 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3583 * when we get the the next-page we nned to adjust so the loop
3584 * condition below will be met. The next element is the size of a
3585 * regular element and hence incrementing by 1
3587 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3590 /* This function may never run in parralel with itself for a
3591 * specific bp, thus there is no need in "paired" read memory
3594 sw_cons = bp->eq_cons;
3595 sw_prod = bp->eq_prod;
3597 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3598 hw_cons, sw_cons, atomic_read(&bp->spq_left));
3600 for (; sw_cons != hw_cons;
3601 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3604 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3606 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3607 opcode = elem->message.opcode;
3610 /* handle eq element */
3612 case EVENT_RING_OPCODE_STAT_QUERY:
3613 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3614 /* nothing to do with stats comp */
3617 case EVENT_RING_OPCODE_CFC_DEL:
3618 /* handle according to cid range */
3620 * we may want to verify here that the bp state is
3623 DP(NETIF_MSG_IFDOWN,
3624 "got delete ramrod for MULTI[%d]\n", cid);
3626 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3629 bnx2x_fp(bp, cid, state) =
3630 BNX2X_FP_STATE_CLOSED;
3635 switch (opcode | bp->state) {
3636 case (EVENT_RING_OPCODE_FUNCTION_START |
3637 BNX2X_STATE_OPENING_WAIT4_PORT):
3638 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3639 bp->state = BNX2X_STATE_FUNC_STARTED;
3642 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3643 BNX2X_STATE_CLOSING_WAIT4_HALT):
3644 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3645 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3648 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3649 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3650 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3651 bp->set_mac_pending = 0;
3654 case (EVENT_RING_OPCODE_SET_MAC |
3655 BNX2X_STATE_CLOSING_WAIT4_HALT):
3656 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3657 bp->set_mac_pending = 0;
3660 /* unknown event log error and continue */
3661 BNX2X_ERR("Unknown EQ event %d\n",
3662 elem->message.opcode);
3668 smp_mb__before_atomic_inc();
3669 atomic_add(spqe_cnt, &bp->spq_left);
3671 bp->eq_cons = sw_cons;
3672 bp->eq_prod = sw_prod;
3673 /* Make sure that above mem writes were issued towards the memory */
3676 /* update producer */
3677 bnx2x_update_eq_prod(bp, bp->eq_prod);
3680 static void bnx2x_sp_task(struct work_struct *work)
3682 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3685 /* Return here if interrupt is disabled */
3686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3691 status = bnx2x_update_dsb_idx(bp);
3692 /* if (status == 0) */
3693 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3695 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3698 if (status & BNX2X_DEF_SB_ATT_IDX) {
3700 status &= ~BNX2X_DEF_SB_ATT_IDX;
3703 /* SP events: STAT_QUERY and others */
3704 if (status & BNX2X_DEF_SB_IDX) {
3706 /* Handle EQ completions */
3709 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3710 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3712 status &= ~BNX2X_DEF_SB_IDX;
3715 if (unlikely(status))
3716 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3719 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3720 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3723 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3725 struct net_device *dev = dev_instance;
3726 struct bnx2x *bp = netdev_priv(dev);
3728 /* Return here if interrupt is disabled */
3729 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3730 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3734 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3735 IGU_INT_DISABLE, 0);
3737 #ifdef BNX2X_STOP_ON_ERROR
3738 if (unlikely(bp->panic))
3744 struct cnic_ops *c_ops;
3747 c_ops = rcu_dereference(bp->cnic_ops);
3749 c_ops->cnic_handler(bp->cnic_data, NULL);
3753 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3758 /* end of slow path */
3760 static void bnx2x_timer(unsigned long data)
3762 struct bnx2x *bp = (struct bnx2x *) data;
3764 if (!netif_running(bp->dev))
3767 if (atomic_read(&bp->intr_sem) != 0)
3771 struct bnx2x_fastpath *fp = &bp->fp[0];
3775 rc = bnx2x_rx_int(fp, 1000);
3778 if (!BP_NOMCP(bp)) {
3779 int mb_idx = BP_FW_MB_IDX(bp);
3783 ++bp->fw_drv_pulse_wr_seq;
3784 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3785 /* TBD - add SYSTEM_TIME */
3786 drv_pulse = bp->fw_drv_pulse_wr_seq;
3787 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3789 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3790 MCP_PULSE_SEQ_MASK);
3791 /* The delta between driver pulse and mcp response
3792 * should be 1 (before mcp response) or 0 (after mcp response)
3794 if ((drv_pulse != mcp_pulse) &&
3795 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3796 /* someone lost a heartbeat... */
3797 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3798 drv_pulse, mcp_pulse);
3802 if (bp->state == BNX2X_STATE_OPEN)
3803 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3806 mod_timer(&bp->timer, jiffies + bp->current_interval);
3809 /* end of Statistics */
3814 * nic init service functions
3817 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3820 if (!(len%4) && !(addr%4))
3821 for (i = 0; i < len; i += 4)
3822 REG_WR(bp, addr + i, fill);
3824 for (i = 0; i < len; i++)
3825 REG_WR8(bp, addr + i, fill);
3829 /* helper: writes FP SP data to FW - data_size in dwords */
3830 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3836 for (index = 0; index < data_size; index++)
3837 REG_WR(bp, BAR_CSTRORM_INTMEM +
3838 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3840 *(sb_data_p + index));
3843 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3847 struct hc_status_block_data_e2 sb_data_e2;
3848 struct hc_status_block_data_e1x sb_data_e1x;
3850 /* disable the function first */
3851 if (CHIP_IS_E2(bp)) {
3852 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3853 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3854 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3855 sb_data_e2.common.p_func.vf_valid = false;
3856 sb_data_p = (u32 *)&sb_data_e2;
3857 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3859 memset(&sb_data_e1x, 0,
3860 sizeof(struct hc_status_block_data_e1x));
3861 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3863 sb_data_e1x.common.p_func.vf_valid = false;
3864 sb_data_p = (u32 *)&sb_data_e1x;
3865 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3867 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3869 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3870 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3871 CSTORM_STATUS_BLOCK_SIZE);
3872 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3873 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3874 CSTORM_SYNC_BLOCK_SIZE);
3877 /* helper: writes SP SB data to FW */
3878 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3879 struct hc_sp_status_block_data *sp_sb_data)
3881 int func = BP_FUNC(bp);
3883 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3884 REG_WR(bp, BAR_CSTRORM_INTMEM +
3885 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3887 *((u32 *)sp_sb_data + i));
3890 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3892 int func = BP_FUNC(bp);
3893 struct hc_sp_status_block_data sp_sb_data;
3894 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3896 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3897 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3898 sp_sb_data.p_func.vf_valid = false;
3900 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3902 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3903 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3904 CSTORM_SP_STATUS_BLOCK_SIZE);
3905 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3906 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3907 CSTORM_SP_SYNC_BLOCK_SIZE);
3913 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3914 int igu_sb_id, int igu_seg_id)
3916 hc_sm->igu_sb_id = igu_sb_id;
3917 hc_sm->igu_seg_id = igu_seg_id;
3918 hc_sm->timer_value = 0xFF;
3919 hc_sm->time_to_expire = 0xFFFFFFFF;
3922 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3923 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3927 struct hc_status_block_data_e2 sb_data_e2;
3928 struct hc_status_block_data_e1x sb_data_e1x;
3929 struct hc_status_block_sm *hc_sm_p;
3930 struct hc_index_data *hc_index_p;
3934 if (CHIP_INT_MODE_IS_BC(bp))
3935 igu_seg_id = HC_SEG_ACCESS_NORM;
3937 igu_seg_id = IGU_SEG_ACCESS_NORM;
3939 bnx2x_zero_fp_sb(bp, fw_sb_id);
3941 if (CHIP_IS_E2(bp)) {
3942 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3943 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3944 sb_data_e2.common.p_func.vf_id = vfid;
3945 sb_data_e2.common.p_func.vf_valid = vf_valid;
3946 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3947 sb_data_e2.common.same_igu_sb_1b = true;
3948 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3949 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3950 hc_sm_p = sb_data_e2.common.state_machine;
3951 hc_index_p = sb_data_e2.index_data;
3952 sb_data_p = (u32 *)&sb_data_e2;
3953 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3955 memset(&sb_data_e1x, 0,
3956 sizeof(struct hc_status_block_data_e1x));
3957 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3958 sb_data_e1x.common.p_func.vf_id = 0xff;
3959 sb_data_e1x.common.p_func.vf_valid = false;
3960 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3961 sb_data_e1x.common.same_igu_sb_1b = true;
3962 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3963 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3964 hc_sm_p = sb_data_e1x.common.state_machine;
3965 hc_index_p = sb_data_e1x.index_data;
3966 sb_data_p = (u32 *)&sb_data_e1x;
3967 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3970 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3971 igu_sb_id, igu_seg_id);
3972 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3973 igu_sb_id, igu_seg_id);
3975 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3977 /* write indecies to HW */
3978 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3981 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3982 u8 sb_index, u8 disable, u16 usec)
3984 int port = BP_PORT(bp);
3985 u8 ticks = usec / BNX2X_BTR;
3987 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3989 disable = disable ? 1 : (usec ? 0 : 1);
3990 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3993 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3994 u16 tx_usec, u16 rx_usec)
3996 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3998 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4002 static void bnx2x_init_def_sb(struct bnx2x *bp)
4004 struct host_sp_status_block *def_sb = bp->def_status_blk;
4005 dma_addr_t mapping = bp->def_status_blk_mapping;
4006 int igu_sp_sb_index;
4008 int port = BP_PORT(bp);
4009 int func = BP_FUNC(bp);
4013 struct hc_sp_status_block_data sp_sb_data;
4014 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4016 if (CHIP_INT_MODE_IS_BC(bp)) {
4017 igu_sp_sb_index = DEF_SB_IGU_ID;
4018 igu_seg_id = HC_SEG_ACCESS_DEF;
4020 igu_sp_sb_index = bp->igu_dsb_id;
4021 igu_seg_id = IGU_SEG_ACCESS_DEF;
4025 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4026 atten_status_block);
4027 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4033 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4035 /* take care of sig[0]..sig[4] */
4036 for (sindex = 0; sindex < 4; sindex++)
4037 bp->attn_group[index].sig[sindex] =
4038 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4042 * enable5 is separate from the rest of the registers,
4043 * and therefore the address skip is 4
4044 * and not 16 between the different groups
4046 bp->attn_group[index].sig[4] = REG_RD(bp,
4047 reg_offset + 0x10 + 0x4*index);
4049 bp->attn_group[index].sig[4] = 0;
4052 if (bp->common.int_block == INT_BLOCK_HC) {
4053 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4054 HC_REG_ATTN_MSG0_ADDR_L);
4056 REG_WR(bp, reg_offset, U64_LO(section));
4057 REG_WR(bp, reg_offset + 4, U64_HI(section));
4058 } else if (CHIP_IS_E2(bp)) {
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4060 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4063 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4066 bnx2x_zero_sp_sb(bp);
4068 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4069 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4070 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4071 sp_sb_data.igu_seg_id = igu_seg_id;
4072 sp_sb_data.p_func.pf_id = func;
4073 sp_sb_data.p_func.vnic_id = BP_VN(bp);
4074 sp_sb_data.p_func.vf_id = 0xff;
4076 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4078 bp->stats_pending = 0;
4079 bp->set_mac_pending = 0;
4081 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4084 void bnx2x_update_coalesce(struct bnx2x *bp)
4088 for_each_queue(bp, i)
4089 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4090 bp->rx_ticks, bp->tx_ticks);
4093 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4095 spin_lock_init(&bp->spq_lock);
4096 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4098 bp->spq_prod_idx = 0;
4099 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4100 bp->spq_prod_bd = bp->spq;
4101 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4104 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4107 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4108 union event_ring_elem *elem =
4109 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4111 elem->next_page.addr.hi =
4112 cpu_to_le32(U64_HI(bp->eq_mapping +
4113 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4114 elem->next_page.addr.lo =
4115 cpu_to_le32(U64_LO(bp->eq_mapping +
4116 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4119 bp->eq_prod = NUM_EQ_DESC;
4120 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4123 static void bnx2x_init_ind_table(struct bnx2x *bp)
4125 int func = BP_FUNC(bp);
4128 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4132 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4133 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4134 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4135 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4136 bp->fp->cl_id + (i % bp->num_queues));
4139 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4141 int mode = bp->rx_mode;
4144 /* All but management unicast packets should pass to the host as well */
4146 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4147 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4148 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4149 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4152 case BNX2X_RX_MODE_NONE: /* no Rx */
4153 cl_id = BP_L_ID(bp);
4154 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4157 case BNX2X_RX_MODE_NORMAL:
4158 cl_id = BP_L_ID(bp);
4159 bnx2x_rxq_set_mac_filters(bp, cl_id,
4160 BNX2X_ACCEPT_UNICAST |
4161 BNX2X_ACCEPT_BROADCAST |
4162 BNX2X_ACCEPT_MULTICAST);
4165 case BNX2X_RX_MODE_ALLMULTI:
4166 cl_id = BP_L_ID(bp);
4167 bnx2x_rxq_set_mac_filters(bp, cl_id,
4168 BNX2X_ACCEPT_UNICAST |
4169 BNX2X_ACCEPT_BROADCAST |
4170 BNX2X_ACCEPT_ALL_MULTICAST);
4173 case BNX2X_RX_MODE_PROMISC:
4174 cl_id = BP_L_ID(bp);
4175 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4177 /* pass management unicast packets as well */
4178 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4182 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4187 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4188 NIG_REG_LLH0_BRB1_DRV_MASK,
4191 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4192 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4193 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4194 bp->mac_filters.ucast_drop_all,
4195 bp->mac_filters.mcast_drop_all,
4196 bp->mac_filters.bcast_drop_all,
4197 bp->mac_filters.ucast_accept_all,
4198 bp->mac_filters.mcast_accept_all,
4199 bp->mac_filters.bcast_accept_all
4202 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4205 static void bnx2x_init_internal_common(struct bnx2x *bp)
4209 if (!CHIP_IS_E1(bp)) {
4211 /* xstorm needs to know whether to add ovlan to packets or not,
4212 * in switch-independent we'll write 0 to here... */
4213 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4215 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4217 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4219 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4223 /* Zero this manually as its initialization is
4224 currently missing in the initTool */
4225 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4226 REG_WR(bp, BAR_USTRORM_INTMEM +
4227 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4228 if (CHIP_IS_E2(bp)) {
4229 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4230 CHIP_INT_MODE_IS_BC(bp) ?
4231 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4235 static void bnx2x_init_internal_port(struct bnx2x *bp)
4240 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4242 switch (load_code) {
4243 case FW_MSG_CODE_DRV_LOAD_COMMON:
4244 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4245 bnx2x_init_internal_common(bp);
4248 case FW_MSG_CODE_DRV_LOAD_PORT:
4249 bnx2x_init_internal_port(bp);
4252 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4253 /* internal memory per function is
4254 initialized inside bnx2x_pf_init */
4258 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4263 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4265 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4267 fp->state = BNX2X_FP_STATE_CLOSED;
4269 fp->index = fp->cid = fp_idx;
4270 fp->cl_id = BP_L_ID(bp) + fp_idx;
4271 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4272 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4273 /* qZone id equals to FW (per path) client id */
4274 fp->cl_qzone_id = fp->cl_id +
4275 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4276 ETH_MAX_RX_CLIENTS_E1H);
4278 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4279 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4280 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4281 /* Setup SB indicies */
4282 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4283 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4285 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4286 "cl_id %d fw_sb %d igu_sb %d\n",
4287 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4289 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4290 fp->fw_sb_id, fp->igu_sb_id);
4292 bnx2x_update_fpsb_idx(fp);
4295 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4299 for_each_queue(bp, i)
4300 bnx2x_init_fp_sb(bp, i);
4303 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4304 BNX2X_VF_ID_INVALID, false,
4305 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4309 /* ensure status block indices were read */
4312 bnx2x_init_def_sb(bp);
4313 bnx2x_update_dsb_idx(bp);
4314 bnx2x_init_rx_rings(bp);
4315 bnx2x_init_tx_rings(bp);
4316 bnx2x_init_sp_ring(bp);
4317 bnx2x_init_eq_ring(bp);
4318 bnx2x_init_internal(bp, load_code);
4320 bnx2x_init_ind_table(bp);
4321 bnx2x_stats_init(bp);
4323 /* At this point, we are ready for interrupts */
4324 atomic_set(&bp->intr_sem, 0);
4326 /* flush all before enabling interrupts */
4330 bnx2x_int_enable(bp);
4332 /* Check for SPIO5 */
4333 bnx2x_attn_int_deasserted0(bp,
4334 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4335 AEU_INPUTS_ATTN_BITS_SPIO5);
4338 /* end of nic init */
4341 * gzip service functions
4344 static int bnx2x_gunzip_init(struct bnx2x *bp)
4346 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4347 &bp->gunzip_mapping, GFP_KERNEL);
4348 if (bp->gunzip_buf == NULL)
4351 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4352 if (bp->strm == NULL)
4355 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4357 if (bp->strm->workspace == NULL)
4367 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4368 bp->gunzip_mapping);
4369 bp->gunzip_buf = NULL;
4372 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4373 " un-compression\n");
4377 static void bnx2x_gunzip_end(struct bnx2x *bp)
4379 kfree(bp->strm->workspace);
4383 if (bp->gunzip_buf) {
4384 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4385 bp->gunzip_mapping);
4386 bp->gunzip_buf = NULL;
4390 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4394 /* check gzip header */
4395 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4396 BNX2X_ERR("Bad gzip header\n");
4404 if (zbuf[3] & FNAME)
4405 while ((zbuf[n++] != 0) && (n < len));
4407 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4408 bp->strm->avail_in = len - n;
4409 bp->strm->next_out = bp->gunzip_buf;
4410 bp->strm->avail_out = FW_BUF_SIZE;
4412 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4416 rc = zlib_inflate(bp->strm, Z_FINISH);
4417 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4418 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4421 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4422 if (bp->gunzip_outlen & 0x3)
4423 netdev_err(bp->dev, "Firmware decompression error:"
4424 " gunzip_outlen (%d) not aligned\n",
4426 bp->gunzip_outlen >>= 2;
4428 zlib_inflateEnd(bp->strm);
4430 if (rc == Z_STREAM_END)
4436 /* nic load/unload */
4439 * General service functions
4442 /* send a NIG loopback debug packet */
4443 static void bnx2x_lb_pckt(struct bnx2x *bp)
4447 /* Ethernet source and destination addresses */
4448 wb_write[0] = 0x55555555;
4449 wb_write[1] = 0x55555555;
4450 wb_write[2] = 0x20; /* SOP */
4451 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4453 /* NON-IP protocol */
4454 wb_write[0] = 0x09000000;
4455 wb_write[1] = 0x55555555;
4456 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4457 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4460 /* some of the internal memories
4461 * are not directly readable from the driver
4462 * to test them we send debug packets
4464 static int bnx2x_int_mem_test(struct bnx2x *bp)
4470 if (CHIP_REV_IS_FPGA(bp))
4472 else if (CHIP_REV_IS_EMUL(bp))
4477 /* Disable inputs of parser neighbor blocks */
4478 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4479 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4480 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4481 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4483 /* Write 0 to parser credits for CFC search request */
4484 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4486 /* send Ethernet packet */
4489 /* TODO do i reset NIG statistic? */
4490 /* Wait until NIG register shows 1 packet of size 0x10 */
4491 count = 1000 * factor;
4494 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4495 val = *bnx2x_sp(bp, wb_data[0]);
4503 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4507 /* Wait until PRS register shows 1 packet */
4508 count = 1000 * factor;
4510 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4518 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4522 /* Reset and init BRB, PRS */
4523 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4525 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4527 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4528 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4530 DP(NETIF_MSG_HW, "part2\n");
4532 /* Disable inputs of parser neighbor blocks */
4533 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4534 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4535 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4536 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4538 /* Write 0 to parser credits for CFC search request */
4539 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4541 /* send 10 Ethernet packets */
4542 for (i = 0; i < 10; i++)
4545 /* Wait until NIG register shows 10 + 1
4546 packets of size 11*0x10 = 0xb0 */
4547 count = 1000 * factor;
4550 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4551 val = *bnx2x_sp(bp, wb_data[0]);
4559 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4563 /* Wait until PRS register shows 2 packets */
4564 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4566 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4568 /* Write 1 to parser credits for CFC search request */
4569 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4571 /* Wait until PRS register shows 3 packets */
4572 msleep(10 * factor);
4573 /* Wait until NIG register shows 1 packet of size 0x10 */
4574 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4576 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4578 /* clear NIG EOP FIFO */
4579 for (i = 0; i < 11; i++)
4580 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4581 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4583 BNX2X_ERR("clear of NIG failed\n");
4587 /* Reset and init BRB, PRS, NIG */
4588 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4590 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4592 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4593 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4596 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4599 /* Enable inputs of parser neighbor blocks */
4600 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4601 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4602 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4603 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4605 DP(NETIF_MSG_HW, "done\n");
4610 static void enable_blocks_attention(struct bnx2x *bp)
4612 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4614 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4616 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4617 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4620 * mask read length error interrupts in brb for parser
4621 * (parsing unit and 'checksum and crc' unit)
4622 * these errors are legal (PU reads fixed length and CAC can cause
4623 * read length error on truncated packets)
4625 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4626 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4627 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4629 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4630 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4631 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4632 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4633 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4634 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4635 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4636 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4637 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4638 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4639 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4640 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4641 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4642 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4643 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4645 if (CHIP_REV_IS_FPGA(bp))
4646 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4647 else if (CHIP_IS_E2(bp))
4648 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4649 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4653 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4655 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4656 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4657 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4658 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4659 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4660 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4661 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4662 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4663 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4664 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
4667 static const struct {
4670 } bnx2x_parity_mask[] = {
4671 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4672 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4673 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4674 {HC_REG_HC_PRTY_MASK, 0x7},
4675 {MISC_REG_MISC_PRTY_MASK, 0x1},
4676 {QM_REG_QM_PRTY_MASK, 0x0},
4677 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4678 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4679 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4680 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4681 {CDU_REG_CDU_PRTY_MASK, 0x0},
4682 {CFC_REG_CFC_PRTY_MASK, 0x0},
4683 {DBG_REG_DBG_PRTY_MASK, 0x0},
4684 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4685 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4686 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4687 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4688 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4689 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4690 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4691 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4692 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4693 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4694 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4695 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4696 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4697 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4698 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4701 static void enable_blocks_parity(struct bnx2x *bp)
4705 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4706 REG_WR(bp, bnx2x_parity_mask[i].addr,
4707 bnx2x_parity_mask[i].mask);
4711 static void bnx2x_reset_common(struct bnx2x *bp)
4714 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4719 static void bnx2x_init_pxp(struct bnx2x *bp)
4722 int r_order, w_order;
4724 pci_read_config_word(bp->pdev,
4725 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4726 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4727 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4729 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4731 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4735 bnx2x_init_pxp_arb(bp, r_order, w_order);
4738 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4748 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4749 SHARED_HW_CFG_FAN_FAILURE_MASK;
4751 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4755 * The fan failure mechanism is usually related to the PHY type since
4756 * the power consumption of the board is affected by the PHY. Currently,
4757 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4759 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4760 for (port = PORT_0; port < PORT_MAX; port++) {
4762 bnx2x_fan_failure_det_req(
4764 bp->common.shmem_base,
4765 bp->common.shmem2_base,
4769 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4771 if (is_required == 0)
4774 /* Fan failure is indicated by SPIO 5 */
4775 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4776 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4778 /* set to active low mode */
4779 val = REG_RD(bp, MISC_REG_SPIO_INT);
4780 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4781 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4782 REG_WR(bp, MISC_REG_SPIO_INT, val);
4784 /* enable interrupt to signal the IGU */
4785 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4786 val |= (1 << MISC_REGISTERS_SPIO_5);
4787 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4790 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4796 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4799 switch (BP_ABS_FUNC(bp)) {
4801 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4804 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4807 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4810 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4813 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4816 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4819 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4822 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4828 REG_WR(bp, offset, pretend_func_num);
4830 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4833 static void bnx2x_pf_disable(struct bnx2x *bp)
4835 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4836 val &= ~IGU_PF_CONF_FUNC_EN;
4838 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4839 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4840 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4843 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4847 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
4849 bnx2x_reset_common(bp);
4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4851 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4853 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4854 if (!CHIP_IS_E1(bp))
4855 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4857 if (CHIP_IS_E2(bp)) {
4861 * 4-port mode or 2-port mode we need to turn of master-enable
4862 * for everyone, after that, turn it back on for self.
4863 * so, we disregard multi-function or not, and always disable
4864 * for all functions on the given path, this means 0,2,4,6 for
4865 * path 0 and 1,3,5,7 for path 1
4867 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4868 if (fid == BP_ABS_FUNC(bp)) {
4870 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4875 bnx2x_pretend_func(bp, fid);
4876 /* clear pf enable */
4877 bnx2x_pf_disable(bp);
4878 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4882 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4883 if (CHIP_IS_E1(bp)) {
4884 /* enable HW interrupt from PXP on USDM overflow
4885 bit 16 on INT_MASK_0 */
4886 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4889 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4893 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4894 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4895 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4896 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4897 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4898 /* make sure this value is 0 */
4899 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4901 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4902 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4903 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4904 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4905 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
4908 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4910 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4911 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4913 /* let the HW do it's magic ... */
4915 /* finish PXP init */
4916 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4918 BNX2X_ERR("PXP2 CFG failed\n");
4921 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4923 BNX2X_ERR("PXP2 RD_INIT failed\n");
4927 /* Timers bug workaround E2 only. We need to set the entire ILT to
4928 * have entries with value "0" and valid bit on.
4929 * This needs to be done by the first PF that is loaded in a path
4930 * (i.e. common phase)
4932 if (CHIP_IS_E2(bp)) {
4933 struct ilt_client_info ilt_cli;
4934 struct bnx2x_ilt ilt;
4935 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4936 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4938 /* initalize dummy TM client */
4940 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4941 ilt_cli.client_num = ILT_CLIENT_TM;
4943 /* Step 1: set zeroes to all ilt page entries with valid bit on
4944 * Step 2: set the timers first/last ilt entry to point
4945 * to the entire range to prevent ILT range error for 3rd/4th
4946 * vnic (this code assumes existance of the vnic)
4948 * both steps performed by call to bnx2x_ilt_client_init_op()
4949 * with dummy TM client
4951 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4952 * and his brother are split registers
4954 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4955 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4956 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4960 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4964 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4965 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4967 if (CHIP_IS_E2(bp)) {
4968 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4969 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4970 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4972 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4974 /* let the HW do it's magic ... */
4977 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4978 } while (factor-- && (val != 1));
4981 BNX2X_ERR("ATC_INIT failed\n");
4986 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4988 /* clean the DMAE memory */
4990 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
4992 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4993 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4994 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4995 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
4997 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4998 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4999 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5000 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5002 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5004 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5007 /* QM queues pointers table */
5008 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5010 /* soft reset pulse */
5011 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5012 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5015 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5018 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5019 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5021 if (!CHIP_REV_IS_SLOW(bp)) {
5022 /* enable hw interrupt from doorbell Q */
5023 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5026 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5027 if (CHIP_MODE_IS_4_PORT(bp)) {
5028 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5029 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5032 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5033 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5036 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5038 if (!CHIP_IS_E1(bp))
5039 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5041 if (CHIP_IS_E2(bp)) {
5042 /* Bit-map indicating which L2 hdrs may appear after the
5043 basic Ethernet header */
5044 int has_ovlan = IS_MF(bp);
5045 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5046 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5049 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5050 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5051 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5052 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5054 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5055 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5056 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5057 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5059 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5060 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5061 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5062 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5064 if (CHIP_MODE_IS_4_PORT(bp))
5065 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5070 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5073 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5074 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5075 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5077 if (CHIP_IS_E2(bp)) {
5078 int has_ovlan = IS_MF(bp);
5079 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5080 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5083 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5084 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5085 REG_WR(bp, i, random32());
5087 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5089 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5090 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5091 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5092 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5093 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5094 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5095 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5096 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5097 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5098 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5100 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5102 if (sizeof(union cdu_context) != 1024)
5103 /* we currently assume that a context is 1024 bytes */
5104 dev_alert(&bp->pdev->dev, "please adjust the size "
5105 "of cdu_context(%ld)\n",
5106 (long)sizeof(union cdu_context));
5108 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5109 val = (4 << 24) + (0 << 12) + 1024;
5110 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5112 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5113 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5114 /* enable context validation interrupt from CFC */
5115 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5117 /* set the thresholds to prevent CFC/CDU race */
5118 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5120 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5122 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5123 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5125 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5126 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5128 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5129 /* Reset PCIE errors for debug */
5130 REG_WR(bp, 0x2814, 0xffffffff);
5131 REG_WR(bp, 0x3820, 0xffffffff);
5133 if (CHIP_IS_E2(bp)) {
5134 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5135 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5136 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5137 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5138 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5139 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5140 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5141 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5142 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5143 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5144 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5147 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5148 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5149 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5150 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5152 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5153 if (!CHIP_IS_E1(bp)) {
5154 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5155 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
5157 if (CHIP_IS_E2(bp)) {
5158 /* Bit-map indicating which L2 hdrs may appear after the
5159 basic Ethernet header */
5160 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5163 if (CHIP_REV_IS_SLOW(bp))
5166 /* finish CFC init */
5167 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5169 BNX2X_ERR("CFC LL_INIT failed\n");
5172 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5174 BNX2X_ERR("CFC AC_INIT failed\n");
5177 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5179 BNX2X_ERR("CFC CAM_INIT failed\n");
5182 REG_WR(bp, CFC_REG_DEBUG0, 0);
5184 if (CHIP_IS_E1(bp)) {
5185 /* read NIG statistic
5186 to see if this is our first up since powerup */
5187 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5188 val = *bnx2x_sp(bp, wb_data[0]);
5190 /* do internal memory self test */
5191 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5192 BNX2X_ERR("internal mem self test failed\n");
5197 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5198 bp->common.shmem_base,
5199 bp->common.shmem2_base);
5201 bnx2x_setup_fan_failure_detection(bp);
5203 /* clear PXP2 attentions */
5204 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5206 enable_blocks_attention(bp);
5207 if (CHIP_PARITY_SUPPORTED(bp))
5208 enable_blocks_parity(bp);
5210 if (!BP_NOMCP(bp)) {
5211 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5212 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5214 u32 shmem_base[2], shmem2_base[2];
5215 shmem_base[0] = bp->common.shmem_base;
5216 shmem2_base[0] = bp->common.shmem2_base;
5217 if (CHIP_IS_E2(bp)) {
5219 SHMEM2_RD(bp, other_shmem_base_addr);
5221 SHMEM2_RD(bp, other_shmem2_base_addr);
5223 bnx2x_acquire_phy_lock(bp);
5224 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5225 bp->common.chip_id);
5226 bnx2x_release_phy_lock(bp);
5229 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5234 static int bnx2x_init_hw_port(struct bnx2x *bp)
5236 int port = BP_PORT(bp);
5237 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5241 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
5243 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5245 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5246 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5248 /* Timers bug workaround: disables the pf_master bit in pglue at
5249 * common phase, we need to enable it here before any dmae access are
5250 * attempted. Therefore we manually added the enable-master to the
5251 * port phase (it also happens in the function phase)
5254 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5256 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5257 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5258 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5259 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5261 /* QM cid (connection) count */
5262 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5265 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5266 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5267 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5270 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5272 if (CHIP_MODE_IS_4_PORT(bp))
5273 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5275 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5276 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5277 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5278 /* no pause for emulation and FPGA */
5283 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5284 else if (bp->dev->mtu > 4096) {
5285 if (bp->flags & ONE_PORT_FLAG)
5289 /* (24*1024 + val*4)/256 */
5290 low = 96 + (val/64) +
5291 ((val % 64) ? 1 : 0);
5294 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5295 high = low + 56; /* 14*1024/256 */
5297 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5298 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5301 if (CHIP_MODE_IS_4_PORT(bp)) {
5302 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5303 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5304 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5305 BRB1_REG_MAC_GUARANTIED_0), 40);
5308 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5310 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5311 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5312 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5313 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5315 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5316 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5317 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5318 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5319 if (CHIP_MODE_IS_4_PORT(bp))
5320 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5322 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5323 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5325 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5327 if (!CHIP_IS_E2(bp)) {
5328 /* configure PBF to work without PAUSE mtu 9000 */
5329 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5331 /* update threshold */
5332 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5333 /* update init credit */
5334 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5337 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5339 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5343 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5345 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5346 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5348 if (CHIP_IS_E1(bp)) {
5349 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5350 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5352 bnx2x_init_block(bp, HC_BLOCK, init_stage);
5354 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5356 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5357 /* init aeu_mask_attn_func_0/1:
5358 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5359 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5360 * bits 4-7 are used for "per vn group attention" */
5361 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5362 (IS_MF(bp) ? 0xF7 : 0x7));
5364 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5365 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5366 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5367 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5368 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5370 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5372 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5374 if (!CHIP_IS_E1(bp)) {
5375 /* 0x2 disable mf_ov, 0x1 enable */
5376 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5377 (IS_MF(bp) ? 0x1 : 0x2));
5379 if (CHIP_IS_E2(bp)) {
5381 switch (bp->mf_mode) {
5382 case MULTI_FUNCTION_SD:
5385 case MULTI_FUNCTION_SI:
5390 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5391 NIG_REG_LLH0_CLS_TYPE), val);
5394 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5395 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5396 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5400 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5401 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5402 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5403 bp->common.shmem_base,
5404 bp->common.shmem2_base);
5405 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5406 bp->common.shmem2_base, port)) {
5407 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5408 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5409 val = REG_RD(bp, reg_addr);
5410 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5411 REG_WR(bp, reg_addr, val);
5413 bnx2x__link_reset(bp);
5418 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5423 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5425 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5427 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5430 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5432 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5435 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5437 u32 i, base = FUNC_ILT_BASE(func);
5438 for (i = base; i < base + ILT_PER_FUNC; i++)
5439 bnx2x_ilt_wr(bp, i, 0);
5442 static int bnx2x_init_hw_func(struct bnx2x *bp)
5444 int port = BP_PORT(bp);
5445 int func = BP_FUNC(bp);
5446 struct bnx2x_ilt *ilt = BP_ILT(bp);
5451 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
5453 /* set MSI reconfigure capability */
5454 if (bp->common.int_block == INT_BLOCK_HC) {
5455 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5456 val = REG_RD(bp, addr);
5457 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5458 REG_WR(bp, addr, val);
5462 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5464 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5465 ilt->lines[cdu_ilt_start + i].page =
5466 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5467 ilt->lines[cdu_ilt_start + i].page_mapping =
5468 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5469 /* cdu ilt pages are allocated manually so there's no need to
5472 bnx2x_ilt_init_op(bp, INITOP_SET);
5475 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5477 /* T1 hash bits value determines the T1 number of entries */
5478 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5483 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5484 #endif /* BCM_CNIC */
5486 if (CHIP_IS_E2(bp)) {
5487 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5489 /* Turn on a single ISR mode in IGU if driver is going to use
5492 if (!(bp->flags & USING_MSIX_FLAG))
5493 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5495 * Timers workaround bug: function init part.
5496 * Need to wait 20msec after initializing ILT,
5497 * needed to make sure there are no requests in
5498 * one of the PXP internal queues with "old" ILT addresses
5502 * Master enable - Due to WB DMAE writes performed before this
5503 * register is re-initialized as part of the regular function
5506 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5507 /* Enable the function in IGU */
5508 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5513 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5516 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5518 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5519 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5520 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5521 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5522 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5523 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5524 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5525 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5526 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5528 if (CHIP_IS_E2(bp)) {
5529 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5531 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5535 if (CHIP_MODE_IS_4_PORT(bp))
5536 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5539 REG_WR(bp, QM_REG_PF_EN, 1);
5541 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5543 if (CHIP_MODE_IS_4_PORT(bp))
5544 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5546 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5547 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5548 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5549 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5550 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5551 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5552 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5553 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5554 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5555 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5556 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5558 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5560 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5565 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5568 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5569 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5572 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5574 /* HC init per function */
5575 if (bp->common.int_block == INT_BLOCK_HC) {
5576 if (CHIP_IS_E1H(bp)) {
5577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5579 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5582 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5585 int num_segs, sb_idx, prod_offset;
5587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5589 if (CHIP_IS_E2(bp)) {
5590 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5591 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5594 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5596 if (CHIP_IS_E2(bp)) {
5600 * E2 mode: address 0-135 match to the mapping memory;
5601 * 136 - PF0 default prod; 137 - PF1 default prod;
5602 * 138 - PF2 default prod; 139 - PF3 default prod;
5603 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5604 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5607 * E1.5 mode - In backward compatible mode;
5608 * for non default SB; each even line in the memory
5609 * holds the U producer and each odd line hold
5610 * the C producer. The first 128 producers are for
5611 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5612 * producers are for the DSB for each PF.
5613 * Each PF has five segments: (the order inside each
5614 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5615 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5616 * 144-147 attn prods;
5618 /* non-default-status-blocks */
5619 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5620 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5621 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5622 prod_offset = (bp->igu_base_sb + sb_idx) *
5625 for (i = 0; i < num_segs; i++) {
5626 addr = IGU_REG_PROD_CONS_MEMORY +
5627 (prod_offset + i) * 4;
5628 REG_WR(bp, addr, 0);
5630 /* send consumer update with value 0 */
5631 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5632 USTORM_ID, 0, IGU_INT_NOP, 1);
5633 bnx2x_igu_clear_sb(bp,
5634 bp->igu_base_sb + sb_idx);
5637 /* default-status-blocks */
5638 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5639 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5641 if (CHIP_MODE_IS_4_PORT(bp))
5642 dsb_idx = BP_FUNC(bp);
5644 dsb_idx = BP_E1HVN(bp);
5646 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5647 IGU_BC_BASE_DSB_PROD + dsb_idx :
5648 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5650 for (i = 0; i < (num_segs * E1HVN_MAX);
5652 addr = IGU_REG_PROD_CONS_MEMORY +
5653 (prod_offset + i)*4;
5654 REG_WR(bp, addr, 0);
5656 /* send consumer update with 0 */
5657 if (CHIP_INT_MODE_IS_BC(bp)) {
5658 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5659 USTORM_ID, 0, IGU_INT_NOP, 1);
5660 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5661 CSTORM_ID, 0, IGU_INT_NOP, 1);
5662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5663 XSTORM_ID, 0, IGU_INT_NOP, 1);
5664 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5665 TSTORM_ID, 0, IGU_INT_NOP, 1);
5666 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5667 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5669 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5670 USTORM_ID, 0, IGU_INT_NOP, 1);
5671 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5672 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5674 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5676 /* !!! these should become driver const once
5677 rf-tool supports split-68 const */
5678 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5679 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5680 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5681 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5682 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5683 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5687 /* Reset PCIE errors for debug */
5688 REG_WR(bp, 0x2114, 0xffffffff);
5689 REG_WR(bp, 0x2120, 0xffffffff);
5691 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5693 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5694 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5695 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5696 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5698 bnx2x_phy_probe(&bp->link_params);
5703 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5707 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5708 BP_ABS_FUNC(bp), load_code);
5711 mutex_init(&bp->dmae_mutex);
5712 rc = bnx2x_gunzip_init(bp);
5716 switch (load_code) {
5717 case FW_MSG_CODE_DRV_LOAD_COMMON:
5718 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5719 rc = bnx2x_init_hw_common(bp, load_code);
5724 case FW_MSG_CODE_DRV_LOAD_PORT:
5725 rc = bnx2x_init_hw_port(bp);
5730 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5731 rc = bnx2x_init_hw_func(bp);
5737 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5741 if (!BP_NOMCP(bp)) {
5742 int mb_idx = BP_FW_MB_IDX(bp);
5744 bp->fw_drv_pulse_wr_seq =
5745 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5746 DRV_PULSE_SEQ_MASK);
5747 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5751 bnx2x_gunzip_end(bp);
5756 void bnx2x_free_mem(struct bnx2x *bp)
5759 #define BNX2X_PCI_FREE(x, y, size) \
5762 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5768 #define BNX2X_FREE(x) \
5780 for_each_queue(bp, i) {
5783 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5784 bnx2x_fp(bp, i, status_blk_mapping),
5785 sizeof(struct host_hc_status_block_e2));
5787 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5788 bnx2x_fp(bp, i, status_blk_mapping),
5789 sizeof(struct host_hc_status_block_e1x));
5792 for_each_queue(bp, i) {
5794 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5795 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5796 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5797 bnx2x_fp(bp, i, rx_desc_mapping),
5798 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5800 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5801 bnx2x_fp(bp, i, rx_comp_mapping),
5802 sizeof(struct eth_fast_path_rx_cqe) *
5806 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5807 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5808 bnx2x_fp(bp, i, rx_sge_mapping),
5809 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5812 for_each_queue(bp, i) {
5814 /* fastpath tx rings: tx_buf tx_desc */
5815 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5816 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5817 bnx2x_fp(bp, i, tx_desc_mapping),
5818 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5820 /* end of fastpath */
5822 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5823 sizeof(struct host_sp_status_block));
5825 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5826 sizeof(struct bnx2x_slowpath));
5828 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5831 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5833 BNX2X_FREE(bp->ilt->lines);
5837 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5838 sizeof(struct host_hc_status_block_e2));
5840 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5841 sizeof(struct host_hc_status_block_e1x));
5843 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5846 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5848 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5849 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5851 #undef BNX2X_PCI_FREE
5855 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5857 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5858 if (CHIP_IS_E2(bp)) {
5859 bnx2x_fp(bp, index, sb_index_values) =
5860 (__le16 *)status_blk.e2_sb->sb.index_values;
5861 bnx2x_fp(bp, index, sb_running_index) =
5862 (__le16 *)status_blk.e2_sb->sb.running_index;
5864 bnx2x_fp(bp, index, sb_index_values) =
5865 (__le16 *)status_blk.e1x_sb->sb.index_values;
5866 bnx2x_fp(bp, index, sb_running_index) =
5867 (__le16 *)status_blk.e1x_sb->sb.running_index;
5871 int bnx2x_alloc_mem(struct bnx2x *bp)
5873 #define BNX2X_PCI_ALLOC(x, y, size) \
5875 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5877 goto alloc_mem_err; \
5878 memset(x, 0, size); \
5881 #define BNX2X_ALLOC(x, size) \
5883 x = kzalloc(size, GFP_KERNEL); \
5885 goto alloc_mem_err; \
5892 for_each_queue(bp, i) {
5893 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5894 bnx2x_fp(bp, i, bp) = bp;
5897 BNX2X_PCI_ALLOC(sb->e2_sb,
5898 &bnx2x_fp(bp, i, status_blk_mapping),
5899 sizeof(struct host_hc_status_block_e2));
5901 BNX2X_PCI_ALLOC(sb->e1x_sb,
5902 &bnx2x_fp(bp, i, status_blk_mapping),
5903 sizeof(struct host_hc_status_block_e1x));
5905 set_sb_shortcuts(bp, i);
5908 for_each_queue(bp, i) {
5910 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5911 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5912 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5913 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5914 &bnx2x_fp(bp, i, rx_desc_mapping),
5915 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5917 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5918 &bnx2x_fp(bp, i, rx_comp_mapping),
5919 sizeof(struct eth_fast_path_rx_cqe) *
5923 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5924 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5925 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5926 &bnx2x_fp(bp, i, rx_sge_mapping),
5927 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5930 for_each_queue(bp, i) {
5932 /* fastpath tx rings: tx_buf tx_desc */
5933 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5934 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5935 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5936 &bnx2x_fp(bp, i, tx_desc_mapping),
5937 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5939 /* end of fastpath */
5943 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5944 sizeof(struct host_hc_status_block_e2));
5946 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5947 sizeof(struct host_hc_status_block_e1x));
5949 /* allocate searcher T2 table */
5950 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5954 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5955 sizeof(struct host_sp_status_block));
5957 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5958 sizeof(struct bnx2x_slowpath));
5960 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5962 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5965 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
5967 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5970 /* Slow path ring */
5971 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5974 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5975 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5982 #undef BNX2X_PCI_ALLOC
5987 * Init service functions
5989 int bnx2x_func_start(struct bnx2x *bp)
5991 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
5993 /* Wait for completion */
5994 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5995 WAIT_RAMROD_COMMON);
5998 int bnx2x_func_stop(struct bnx2x *bp)
6000 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6002 /* Wait for completion */
6003 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6004 0, &(bp->state), WAIT_RAMROD_COMMON);
6008 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6010 * @param bp driver descriptor
6011 * @param set set or clear an entry (1 or 0)
6012 * @param mac pointer to a buffer containing a MAC
6013 * @param cl_bit_vec bit vector of clients to register a MAC for
6014 * @param cam_offset offset in a CAM to use
6015 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6017 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6018 u32 cl_bit_vec, u8 cam_offset,
6021 struct mac_configuration_cmd *config =
6022 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6023 int ramrod_flags = WAIT_RAMROD_COMMON;
6025 bp->set_mac_pending = 1;
6028 config->hdr.length = 1;
6029 config->hdr.offset = cam_offset;
6030 config->hdr.client_id = 0xff;
6031 config->hdr.reserved1 = 0;
6034 config->config_table[0].msb_mac_addr =
6035 swab16(*(u16 *)&mac[0]);
6036 config->config_table[0].middle_mac_addr =
6037 swab16(*(u16 *)&mac[2]);
6038 config->config_table[0].lsb_mac_addr =
6039 swab16(*(u16 *)&mac[4]);
6040 config->config_table[0].clients_bit_vector =
6041 cpu_to_le32(cl_bit_vec);
6042 config->config_table[0].vlan_id = 0;
6043 config->config_table[0].pf_id = BP_FUNC(bp);
6045 SET_FLAG(config->config_table[0].flags,
6046 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6047 T_ETH_MAC_COMMAND_SET);
6049 SET_FLAG(config->config_table[0].flags,
6050 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6051 T_ETH_MAC_COMMAND_INVALIDATE);
6054 SET_FLAG(config->config_table[0].flags,
6055 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6057 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6058 (set ? "setting" : "clearing"),
6059 config->config_table[0].msb_mac_addr,
6060 config->config_table[0].middle_mac_addr,
6061 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6063 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6064 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6065 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6067 /* Wait for a completion */
6068 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6071 int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6072 int *state_p, int flags)
6074 /* can take a while if any port is running */
6076 u8 poll = flags & WAIT_RAMROD_POLL;
6077 u8 common = flags & WAIT_RAMROD_COMMON;
6079 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6080 poll ? "polling" : "waiting", state, idx);
6088 bnx2x_rx_int(bp->fp, 10);
6089 /* if index is different from 0
6090 * the reply for some commands will
6091 * be on the non default queue
6094 bnx2x_rx_int(&bp->fp[idx], 10);
6098 mb(); /* state is changed by bnx2x_sp_event() */
6099 if (*state_p == state) {
6100 #ifdef BNX2X_STOP_ON_ERROR
6101 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6113 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6114 poll ? "polling" : "waiting", state, idx);
6115 #ifdef BNX2X_STOP_ON_ERROR
6122 u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6124 if (CHIP_IS_E1H(bp))
6125 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6126 else if (CHIP_MODE_IS_4_PORT(bp))
6127 return BP_FUNC(bp) * 32 + rel_offset;
6129 return BP_VN(bp) * 32 + rel_offset;
6132 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6134 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6135 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6137 /* networking MAC */
6138 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6139 (1 << bp->fp->cl_id), cam_offset , 0);
6141 if (CHIP_IS_E1(bp)) {
6143 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6144 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6147 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6150 struct net_device *dev = bp->dev;
6151 struct netdev_hw_addr *ha;
6152 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6153 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6155 netdev_for_each_mc_addr(ha, dev) {
6157 config_cmd->config_table[i].msb_mac_addr =
6158 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6159 config_cmd->config_table[i].middle_mac_addr =
6160 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6161 config_cmd->config_table[i].lsb_mac_addr =
6162 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6164 config_cmd->config_table[i].vlan_id = 0;
6165 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6166 config_cmd->config_table[i].clients_bit_vector =
6167 cpu_to_le32(1 << BP_L_ID(bp));
6169 SET_FLAG(config_cmd->config_table[i].flags,
6170 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6171 T_ETH_MAC_COMMAND_SET);
6174 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6175 config_cmd->config_table[i].msb_mac_addr,
6176 config_cmd->config_table[i].middle_mac_addr,
6177 config_cmd->config_table[i].lsb_mac_addr);
6180 old = config_cmd->hdr.length;
6182 for (; i < old; i++) {
6183 if (CAM_IS_INVALID(config_cmd->
6185 /* already invalidated */
6189 SET_FLAG(config_cmd->config_table[i].flags,
6190 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6191 T_ETH_MAC_COMMAND_INVALIDATE);
6195 config_cmd->hdr.length = i;
6196 config_cmd->hdr.offset = offset;
6197 config_cmd->hdr.client_id = 0xff;
6198 config_cmd->hdr.reserved1 = 0;
6200 bp->set_mac_pending = 1;
6203 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6204 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6206 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6209 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6210 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6211 int ramrod_flags = WAIT_RAMROD_COMMON;
6213 bp->set_mac_pending = 1;
6216 for (i = 0; i < config_cmd->hdr.length; i++)
6217 SET_FLAG(config_cmd->config_table[i].flags,
6218 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6219 T_ETH_MAC_COMMAND_INVALIDATE);
6221 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6222 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6224 /* Wait for a completion */
6225 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6232 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6233 * MAC(s). This function will wait until the ramdord completion
6236 * @param bp driver handle
6237 * @param set set or clear the CAM entry
6239 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6241 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6243 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6244 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6245 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6246 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6248 /* Send a SET_MAC ramrod */
6249 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6255 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6256 struct bnx2x_client_init_params *params,
6258 struct client_init_ramrod_data *data)
6260 /* Clear the buffer */
6261 memset(data, 0, sizeof(*data));
6264 data->general.client_id = params->rxq_params.cl_id;
6265 data->general.statistics_counter_id = params->rxq_params.stat_id;
6266 data->general.statistics_en_flg =
6267 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6268 data->general.activate_flg = activate;
6269 data->general.sp_client_id = params->rxq_params.spcl_id;
6272 data->rx.tpa_en_flg =
6273 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6274 data->rx.vmqueue_mode_en_flg = 0;
6275 data->rx.cache_line_alignment_log_size =
6276 params->rxq_params.cache_line_log;
6277 data->rx.enable_dynamic_hc =
6278 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6279 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6280 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6281 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6283 /* We don't set drop flags */
6284 data->rx.drop_ip_cs_err_flg = 0;
6285 data->rx.drop_tcp_cs_err_flg = 0;
6286 data->rx.drop_ttl0_flg = 0;
6287 data->rx.drop_udp_cs_err_flg = 0;
6289 data->rx.inner_vlan_removal_enable_flg =
6290 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6291 data->rx.outer_vlan_removal_enable_flg =
6292 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6293 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6294 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6295 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6296 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6297 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6298 data->rx.bd_page_base.lo =
6299 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6300 data->rx.bd_page_base.hi =
6301 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6302 data->rx.sge_page_base.lo =
6303 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6304 data->rx.sge_page_base.hi =
6305 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6306 data->rx.cqe_page_base.lo =
6307 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6308 data->rx.cqe_page_base.hi =
6309 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6310 data->rx.is_leading_rss =
6311 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6312 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6315 data->tx.enforce_security_flg = 0; /* VF specific */
6316 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6317 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6318 data->tx.mtu = 0; /* VF specific */
6319 data->tx.tx_bd_page_base.lo =
6320 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6321 data->tx.tx_bd_page_base.hi =
6322 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6324 /* flow control data */
6325 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6326 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6327 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6328 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6329 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6330 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6331 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6333 data->fc.safc_group_num = params->txq_params.cos;
6334 data->fc.safc_group_en_flg =
6335 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6336 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6339 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6341 /* ustorm cxt validation */
6342 cxt->ustorm_ag_context.cdu_usage =
6343 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6344 ETH_CONNECTION_TYPE);
6345 /* xcontext validation */
6346 cxt->xstorm_ag_context.cdu_reserved =
6347 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6348 ETH_CONNECTION_TYPE);
6351 int bnx2x_setup_fw_client(struct bnx2x *bp,
6352 struct bnx2x_client_init_params *params,
6354 struct client_init_ramrod_data *data,
6355 dma_addr_t data_mapping)
6358 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6359 int ramrod_flags = 0, rc;
6361 /* HC and context validation values */
6362 hc_usec = params->txq_params.hc_rate ?
6363 1000000 / params->txq_params.hc_rate : 0;
6364 bnx2x_update_coalesce_sb_index(bp,
6365 params->txq_params.fw_sb_id,
6366 params->txq_params.sb_cq_index,
6367 !(params->txq_params.flags & QUEUE_FLG_HC),
6370 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6372 hc_usec = params->rxq_params.hc_rate ?
6373 1000000 / params->rxq_params.hc_rate : 0;
6374 bnx2x_update_coalesce_sb_index(bp,
6375 params->rxq_params.fw_sb_id,
6376 params->rxq_params.sb_cq_index,
6377 !(params->rxq_params.flags & QUEUE_FLG_HC),
6380 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6381 params->rxq_params.cid);
6384 if (params->txq_params.flags & QUEUE_FLG_STATS)
6385 storm_memset_xstats_zero(bp, BP_PORT(bp),
6386 params->txq_params.stat_id);
6388 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6389 storm_memset_ustats_zero(bp, BP_PORT(bp),
6390 params->rxq_params.stat_id);
6391 storm_memset_tstats_zero(bp, BP_PORT(bp),
6392 params->rxq_params.stat_id);
6395 /* Fill the ramrod data */
6396 bnx2x_fill_cl_init_data(bp, params, activate, data);
6400 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6401 * barrier except from mmiowb() is needed to impose a
6402 * proper ordering of memory operations.
6407 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6408 U64_HI(data_mapping), U64_LO(data_mapping), 0);
6410 /* Wait for completion */
6411 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6412 params->ramrod_params.index,
6413 params->ramrod_params.pstate,
6419 * Configure interrupt mode according to current configuration.
6420 * In case of MSI-X it will also try to enable MSI-X.
6426 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6430 switch (bp->int_mode) {
6432 bnx2x_enable_msi(bp);
6433 /* falling through... */
6436 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6439 /* Set number of queues according to bp->multi_mode value */
6440 bnx2x_set_num_queues(bp);
6442 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6445 /* if we can't use MSI-X we only need one fp,
6446 * so try to enable MSI-X with the requested number of fp's
6447 * and fallback to MSI or legacy INTx with one fp
6449 rc = bnx2x_enable_msix(bp);
6451 /* failed to enable MSI-X */
6454 "Multi requested but failed to "
6455 "enable MSI-X (%d), "
6456 "set number of queues to %d\n",
6461 if (!(bp->flags & DISABLE_MSI_FLAG))
6462 bnx2x_enable_msi(bp);
6471 /* must be called prioir to any HW initializations */
6472 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6474 return L2_ILT_LINES(bp);
6477 void bnx2x_ilt_set_info(struct bnx2x *bp)
6479 struct ilt_client_info *ilt_client;
6480 struct bnx2x_ilt *ilt = BP_ILT(bp);
6483 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6484 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6487 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6488 ilt_client->client_num = ILT_CLIENT_CDU;
6489 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6490 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6491 ilt_client->start = line;
6492 line += L2_ILT_LINES(bp);
6494 line += CNIC_ILT_LINES;
6496 ilt_client->end = line - 1;
6498 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6499 "flags 0x%x, hw psz %d\n",
6502 ilt_client->page_size,
6504 ilog2(ilt_client->page_size >> 12));
6507 if (QM_INIT(bp->qm_cid_count)) {
6508 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6509 ilt_client->client_num = ILT_CLIENT_QM;
6510 ilt_client->page_size = QM_ILT_PAGE_SZ;
6511 ilt_client->flags = 0;
6512 ilt_client->start = line;
6514 /* 4 bytes for each cid */
6515 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6518 ilt_client->end = line - 1;
6520 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6521 "flags 0x%x, hw psz %d\n",
6524 ilt_client->page_size,
6526 ilog2(ilt_client->page_size >> 12));
6530 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6532 ilt_client->client_num = ILT_CLIENT_SRC;
6533 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6534 ilt_client->flags = 0;
6535 ilt_client->start = line;
6536 line += SRC_ILT_LINES;
6537 ilt_client->end = line - 1;
6539 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6540 "flags 0x%x, hw psz %d\n",
6543 ilt_client->page_size,
6545 ilog2(ilt_client->page_size >> 12));
6548 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6552 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6554 ilt_client->client_num = ILT_CLIENT_TM;
6555 ilt_client->page_size = TM_ILT_PAGE_SZ;
6556 ilt_client->flags = 0;
6557 ilt_client->start = line;
6558 line += TM_ILT_LINES;
6559 ilt_client->end = line - 1;
6561 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6562 "flags 0x%x, hw psz %d\n",
6565 ilt_client->page_size,
6567 ilog2(ilt_client->page_size >> 12));
6570 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6574 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6577 struct bnx2x_client_init_params params = { {0} };
6580 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6583 params.ramrod_params.pstate = &fp->state;
6584 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6585 params.ramrod_params.index = fp->index;
6586 params.ramrod_params.cid = fp->cid;
6589 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6591 bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params);
6593 bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params);
6595 rc = bnx2x_setup_fw_client(bp, ¶ms, 1,
6596 bnx2x_sp(bp, client_init_data),
6597 bnx2x_sp_mapping(bp, client_init_data));
6601 int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6605 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6607 /* halt the connection */
6608 *p->pstate = BNX2X_FP_STATE_HALTING;
6609 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6612 /* Wait for completion */
6613 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6614 p->pstate, poll_flag);
6615 if (rc) /* timeout */
6618 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6619 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6621 /* Wait for completion */
6622 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6623 p->pstate, poll_flag);
6624 if (rc) /* timeout */
6628 /* delete cfc entry */
6629 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6631 /* Wait for completion */
6632 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6633 p->pstate, WAIT_RAMROD_COMMON);
6637 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6639 struct bnx2x_client_ramrod_params client_stop = {0};
6640 struct bnx2x_fastpath *fp = &bp->fp[index];
6642 client_stop.index = index;
6643 client_stop.cid = fp->cid;
6644 client_stop.cl_id = fp->cl_id;
6645 client_stop.pstate = &(fp->state);
6646 client_stop.poll = 0;
6648 return bnx2x_stop_fw_client(bp, &client_stop);
6652 static void bnx2x_reset_func(struct bnx2x *bp)
6654 int port = BP_PORT(bp);
6655 int func = BP_FUNC(bp);
6657 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6659 offsetof(struct hc_status_block_data_e2, common) :
6660 offsetof(struct hc_status_block_data_e1x, common));
6661 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6662 int pfid_offset = offsetof(struct pci_entity, pf_id);
6664 /* Disable the function in the FW */
6665 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6666 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6667 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6668 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6671 for_each_queue(bp, i) {
6672 struct bnx2x_fastpath *fp = &bp->fp[i];
6674 BAR_CSTRORM_INTMEM +
6675 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6676 + pfunc_offset_fp + pfid_offset,
6677 HC_FUNCTION_DISABLED);
6682 BAR_CSTRORM_INTMEM +
6683 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6684 pfunc_offset_sp + pfid_offset,
6685 HC_FUNCTION_DISABLED);
6688 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6693 if (bp->common.int_block == INT_BLOCK_HC) {
6694 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6695 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6697 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6698 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6702 /* Disable Timer scan */
6703 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6705 * Wait for at least 10ms and up to 2 second for the timers scan to
6708 for (i = 0; i < 200; i++) {
6710 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6715 bnx2x_clear_func_ilt(bp, func);
6717 /* Timers workaround bug for E2: if this is vnic-3,
6718 * we need to set the entire ilt range for this timers.
6720 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6721 struct ilt_client_info ilt_cli;
6722 /* use dummy TM client */
6723 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6725 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6726 ilt_cli.client_num = ILT_CLIENT_TM;
6728 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6731 /* this assumes that reset_port() called before reset_func()*/
6733 bnx2x_pf_disable(bp);
6738 static void bnx2x_reset_port(struct bnx2x *bp)
6740 int port = BP_PORT(bp);
6743 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6745 /* Do not rcv packets to BRB */
6746 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6747 /* Do not direct rcv packets that are not for MCP to the BRB */
6748 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6749 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6752 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6755 /* Check for BRB port occupancy */
6756 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6758 DP(NETIF_MSG_IFDOWN,
6759 "BRB1 is not empty %d blocks are occupied\n", val);
6761 /* TODO: Close Doorbell port? */
6764 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6766 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6767 BP_ABS_FUNC(bp), reset_code);
6769 switch (reset_code) {
6770 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6771 bnx2x_reset_port(bp);
6772 bnx2x_reset_func(bp);
6773 bnx2x_reset_common(bp);
6776 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6777 bnx2x_reset_port(bp);
6778 bnx2x_reset_func(bp);
6781 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6782 bnx2x_reset_func(bp);
6786 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6791 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6793 int port = BP_PORT(bp);
6797 /* Wait until tx fastpath tasks complete */
6798 for_each_queue(bp, i) {
6799 struct bnx2x_fastpath *fp = &bp->fp[i];
6802 while (bnx2x_has_tx_work_unload(fp)) {
6805 BNX2X_ERR("timeout waiting for queue[%d]\n",
6807 #ifdef BNX2X_STOP_ON_ERROR
6818 /* Give HW time to discard old tx messages */
6821 if (CHIP_IS_E1(bp)) {
6822 /* invalidate mc list,
6823 * wait and poll (interrupts are off)
6825 bnx2x_invlidate_e1_mc_list(bp);
6826 bnx2x_set_eth_mac(bp, 0);
6829 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6831 bnx2x_set_eth_mac(bp, 0);
6833 for (i = 0; i < MC_HASH_SIZE; i++)
6834 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6838 /* Clear iSCSI L2 MAC */
6839 mutex_lock(&bp->cnic_mutex);
6840 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6841 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6842 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6844 mutex_unlock(&bp->cnic_mutex);
6847 if (unload_mode == UNLOAD_NORMAL)
6848 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6850 else if (bp->flags & NO_WOL_FLAG)
6851 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6854 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6855 u8 *mac_addr = bp->dev->dev_addr;
6857 /* The mac address is written to entries 1-4 to
6858 preserve entry 0 which is used by the PMF */
6859 u8 entry = (BP_E1HVN(bp) + 1)*8;
6861 val = (mac_addr[0] << 8) | mac_addr[1];
6862 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6864 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6865 (mac_addr[4] << 8) | mac_addr[5];
6866 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6868 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6871 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6873 /* Close multi and leading connections
6874 Completions for ramrods are collected in a synchronous way */
6875 for_each_queue(bp, i)
6877 if (bnx2x_stop_client(bp, i))
6878 #ifdef BNX2X_STOP_ON_ERROR
6884 rc = bnx2x_func_stop(bp);
6886 BNX2X_ERR("Function stop failed!\n");
6887 #ifdef BNX2X_STOP_ON_ERROR
6893 #ifndef BNX2X_STOP_ON_ERROR
6897 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6899 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6900 "%d, %d, %d\n", BP_PATH(bp),
6901 load_count[BP_PATH(bp)][0],
6902 load_count[BP_PATH(bp)][1],
6903 load_count[BP_PATH(bp)][2]);
6904 load_count[BP_PATH(bp)][0]--;
6905 load_count[BP_PATH(bp)][1 + port]--;
6906 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6907 "%d, %d, %d\n", BP_PATH(bp),
6908 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6909 load_count[BP_PATH(bp)][2]);
6910 if (load_count[BP_PATH(bp)][0] == 0)
6911 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6912 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6913 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6915 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6918 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6919 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6920 bnx2x__link_reset(bp);
6922 /* Disable HW interrupts, NAPI */
6923 bnx2x_netif_stop(bp, 1);
6928 /* Reset the chip */
6929 bnx2x_reset_chip(bp, reset_code);
6931 /* Report UNLOAD_DONE to MCP */
6933 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6937 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6941 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6943 if (CHIP_IS_E1(bp)) {
6944 int port = BP_PORT(bp);
6945 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6946 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6948 val = REG_RD(bp, addr);
6950 REG_WR(bp, addr, val);
6951 } else if (CHIP_IS_E1H(bp)) {
6952 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6953 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6954 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6955 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6959 /* Close gates #2, #3 and #4: */
6960 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6964 /* Gates #2 and #4a are closed/opened for "not E1" only */
6965 if (!CHIP_IS_E1(bp)) {
6967 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6968 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6969 close ? (val | 0x1) : (val & (~(u32)1)));
6971 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6972 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6973 close ? (val | 0x1) : (val & (~(u32)1)));
6977 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6978 val = REG_RD(bp, addr);
6979 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6981 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6982 close ? "closing" : "opening");
6986 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6988 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6990 /* Do some magic... */
6991 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6992 *magic_val = val & SHARED_MF_CLP_MAGIC;
6993 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6996 /* Restore the value of the `magic' bit.
6998 * @param pdev Device handle.
6999 * @param magic_val Old value of the `magic' bit.
7001 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7003 /* Restore the `magic' bit value... */
7004 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7005 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7006 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7010 * Prepares for MCP reset: takes care of CLP configurations.
7013 * @param magic_val Old value of 'magic' bit.
7015 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7018 u32 validity_offset;
7020 DP(NETIF_MSG_HW, "Starting\n");
7022 /* Set `magic' bit in order to save MF config */
7023 if (!CHIP_IS_E1(bp))
7024 bnx2x_clp_reset_prep(bp, magic_val);
7026 /* Get shmem offset */
7027 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7028 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7030 /* Clear validity map flags */
7032 REG_WR(bp, shmem + validity_offset, 0);
7035 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7036 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
7038 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7039 * depending on the HW type.
7043 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7045 /* special handling for emulation and FPGA,
7046 wait 10 times longer */
7047 if (CHIP_REV_IS_SLOW(bp))
7048 msleep(MCP_ONE_TIMEOUT*10);
7050 msleep(MCP_ONE_TIMEOUT);
7053 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7055 u32 shmem, cnt, validity_offset, val;
7060 /* Get shmem offset */
7061 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7063 BNX2X_ERR("Shmem 0 return failure\n");
7068 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7070 /* Wait for MCP to come up */
7071 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7072 /* TBD: its best to check validity map of last port.
7073 * currently checks on port 0.
7075 val = REG_RD(bp, shmem + validity_offset);
7076 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7077 shmem + validity_offset, val);
7079 /* check that shared memory is valid. */
7080 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7081 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7084 bnx2x_mcp_wait_one(bp);
7087 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7089 /* Check that shared memory is valid. This indicates that MCP is up. */
7090 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7091 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7092 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7098 /* Restore the `magic' bit value */
7099 if (!CHIP_IS_E1(bp))
7100 bnx2x_clp_reset_done(bp, magic_val);
7105 static void bnx2x_pxp_prep(struct bnx2x *bp)
7107 if (!CHIP_IS_E1(bp)) {
7108 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7109 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7110 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7116 * Reset the whole chip except for:
7118 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7121 * - MISC (including AEU)
7125 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7127 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7130 MISC_REGISTERS_RESET_REG_1_RST_HC |
7131 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7132 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7135 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7136 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7137 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7138 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7139 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7140 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7141 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7142 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7144 reset_mask1 = 0xffffffff;
7147 reset_mask2 = 0xffff;
7149 reset_mask2 = 0x1ffff;
7151 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7152 reset_mask1 & (~not_reset_mask1));
7153 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7154 reset_mask2 & (~not_reset_mask2));
7159 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7160 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7164 static int bnx2x_process_kill(struct bnx2x *bp)
7168 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7171 /* Empty the Tetris buffer, wait for 1s */
7173 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7174 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7175 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7176 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7177 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7178 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7179 ((port_is_idle_0 & 0x1) == 0x1) &&
7180 ((port_is_idle_1 & 0x1) == 0x1) &&
7181 (pgl_exp_rom2 == 0xffffffff))
7184 } while (cnt-- > 0);
7187 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7189 " outstanding read requests after 1s!\n");
7190 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7191 " port_is_idle_0=0x%08x,"
7192 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7193 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7200 /* Close gates #2, #3 and #4 */
7201 bnx2x_set_234_gates(bp, true);
7203 /* TBD: Indicate that "process kill" is in progress to MCP */
7205 /* Clear "unprepared" bit */
7206 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7209 /* Make sure all is written to the chip before the reset */
7212 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7213 * PSWHST, GRC and PSWRD Tetris buffer.
7217 /* Prepare to chip reset: */
7219 bnx2x_reset_mcp_prep(bp, &val);
7225 /* reset the chip */
7226 bnx2x_process_kill_chip_reset(bp);
7229 /* Recover after reset: */
7231 if (bnx2x_reset_mcp_comp(bp, val))
7237 /* Open the gates #2, #3 and #4 */
7238 bnx2x_set_234_gates(bp, false);
7240 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7241 * reset state, re-enable attentions. */
7246 static int bnx2x_leader_reset(struct bnx2x *bp)
7249 /* Try to recover after the failure */
7250 if (bnx2x_process_kill(bp)) {
7251 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7254 goto exit_leader_reset;
7257 /* Clear "reset is in progress" bit and update the driver state */
7258 bnx2x_set_reset_done(bp);
7259 bp->recovery_state = BNX2X_RECOVERY_DONE;
7263 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7268 /* Assumption: runs under rtnl lock. This together with the fact
7269 * that it's called only from bnx2x_reset_task() ensure that it
7270 * will never be called when netif_running(bp->dev) is false.
7272 static void bnx2x_parity_recover(struct bnx2x *bp)
7274 DP(NETIF_MSG_HW, "Handling parity\n");
7276 switch (bp->recovery_state) {
7277 case BNX2X_RECOVERY_INIT:
7278 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7279 /* Try to get a LEADER_LOCK HW lock */
7280 if (bnx2x_trylock_hw_lock(bp,
7281 HW_LOCK_RESOURCE_RESERVED_08))
7284 /* Stop the driver */
7285 /* If interface has been removed - break */
7286 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7289 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7290 /* Ensure "is_leader" and "recovery_state"
7291 * update values are seen on other CPUs
7296 case BNX2X_RECOVERY_WAIT:
7297 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7298 if (bp->is_leader) {
7299 u32 load_counter = bnx2x_get_load_cnt(bp);
7301 /* Wait until all other functions get
7304 schedule_delayed_work(&bp->reset_task,
7308 /* If all other functions got down -
7309 * try to bring the chip back to
7310 * normal. In any case it's an exit
7311 * point for a leader.
7313 if (bnx2x_leader_reset(bp) ||
7314 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7315 printk(KERN_ERR"%s: Recovery "
7316 "has failed. Power cycle is "
7317 "needed.\n", bp->dev->name);
7318 /* Disconnect this device */
7319 netif_device_detach(bp->dev);
7320 /* Block ifup for all function
7321 * of this ASIC until
7322 * "process kill" or power
7325 bnx2x_set_reset_in_progress(bp);
7326 /* Shut down the power */
7327 bnx2x_set_power_state(bp,
7334 } else { /* non-leader */
7335 if (!bnx2x_reset_is_done(bp)) {
7336 /* Try to get a LEADER_LOCK HW lock as
7337 * long as a former leader may have
7338 * been unloaded by the user or
7339 * released a leadership by another
7342 if (bnx2x_trylock_hw_lock(bp,
7343 HW_LOCK_RESOURCE_RESERVED_08)) {
7344 /* I'm a leader now! Restart a
7351 schedule_delayed_work(&bp->reset_task,
7355 } else { /* A leader has completed
7356 * the "process kill". It's an exit
7357 * point for a non-leader.
7359 bnx2x_nic_load(bp, LOAD_NORMAL);
7360 bp->recovery_state =
7361 BNX2X_RECOVERY_DONE;
7372 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7373 * scheduled on a general queue in order to prevent a dead lock.
7375 static void bnx2x_reset_task(struct work_struct *work)
7377 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7379 #ifdef BNX2X_STOP_ON_ERROR
7380 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7381 " so reset not done to allow debug dump,\n"
7382 KERN_ERR " you will need to reboot when done\n");
7388 if (!netif_running(bp->dev))
7389 goto reset_task_exit;
7391 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7392 bnx2x_parity_recover(bp);
7394 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7395 bnx2x_nic_load(bp, LOAD_NORMAL);
7402 /* end of nic load/unload */
7405 * Init service functions
7408 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7410 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7411 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7412 return base + (BP_ABS_FUNC(bp)) * stride;
7415 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7417 u32 reg = bnx2x_get_pretend_reg(bp);
7419 /* Flush all outstanding writes */
7422 /* Pretend to be function 0 */
7424 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
7426 /* From now we are in the "like-E1" mode */
7427 bnx2x_int_disable(bp);
7429 /* Flush all outstanding writes */
7432 /* Restore the original function */
7433 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7437 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7440 bnx2x_int_disable(bp);
7442 bnx2x_undi_int_disable_e1h(bp);
7445 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7449 /* Check if there is any driver already loaded */
7450 val = REG_RD(bp, MISC_REG_UNPREPARED);
7452 /* Check if it is the UNDI driver
7453 * UNDI driver initializes CID offset for normal bell to 0x7
7455 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7456 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7458 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7459 /* save our pf_num */
7460 int orig_pf_num = bp->pf_num;
7464 /* clear the UNDI indication */
7465 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7467 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7469 /* try unload UNDI on port 0 */
7472 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7473 DRV_MSG_SEQ_NUMBER_MASK);
7474 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7476 /* if UNDI is loaded on the other port */
7477 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7479 /* send "DONE" for previous unload */
7480 bnx2x_fw_command(bp,
7481 DRV_MSG_CODE_UNLOAD_DONE, 0);
7483 /* unload UNDI on port 1 */
7486 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7487 DRV_MSG_SEQ_NUMBER_MASK);
7488 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7490 bnx2x_fw_command(bp, reset_code, 0);
7493 /* now it's safe to release the lock */
7494 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7496 bnx2x_undi_int_disable(bp);
7498 /* close input traffic and wait for it */
7499 /* Do not rcv packets to BRB */
7501 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7502 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7503 /* Do not direct rcv packets that are not for MCP to
7506 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7507 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7510 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7511 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7514 /* save NIG port swap info */
7515 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7516 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7519 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7522 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7524 /* take the NIG out of reset and restore swap values */
7526 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7527 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7528 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7529 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7531 /* send unload done to the MCP */
7532 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7534 /* restore our func and fw_seq */
7535 bp->pf_num = orig_pf_num;
7537 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7538 DRV_MSG_SEQ_NUMBER_MASK);
7540 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7544 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7546 u32 val, val2, val3, val4, id;
7549 /* Get the chip revision id and number. */
7550 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7551 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7552 id = ((val & 0xffff) << 16);
7553 val = REG_RD(bp, MISC_REG_CHIP_REV);
7554 id |= ((val & 0xf) << 12);
7555 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7556 id |= ((val & 0xff) << 4);
7557 val = REG_RD(bp, MISC_REG_BOND_ID);
7559 bp->common.chip_id = id;
7561 /* Set doorbell size */
7562 bp->db_size = (1 << BNX2X_DB_SHIFT);
7564 if (CHIP_IS_E2(bp)) {
7565 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7567 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7569 val = (val >> 1) & 1;
7570 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7572 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7575 if (CHIP_MODE_IS_4_PORT(bp))
7576 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7578 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7580 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7581 bp->pfid = bp->pf_num; /* 0..7 */
7585 * set base FW non-default (fast path) status block id, this value is
7586 * used to initialize the fw_sb_id saved on the fp/queue structure to
7587 * determine the id used by the FW.
7589 if (CHIP_IS_E1x(bp))
7590 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7592 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7594 bp->link_params.chip_id = bp->common.chip_id;
7595 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7597 val = (REG_RD(bp, 0x2874) & 0x55);
7598 if ((bp->common.chip_id & 0x1) ||
7599 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7600 bp->flags |= ONE_PORT_FLAG;
7601 BNX2X_DEV_INFO("single port device\n");
7604 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7605 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7606 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7607 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7608 bp->common.flash_size, bp->common.flash_size);
7610 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7611 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7612 MISC_REG_GENERIC_CR_1 :
7613 MISC_REG_GENERIC_CR_0));
7614 bp->link_params.shmem_base = bp->common.shmem_base;
7615 bp->link_params.shmem2_base = bp->common.shmem2_base;
7616 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7617 bp->common.shmem_base, bp->common.shmem2_base);
7619 if (!bp->common.shmem_base) {
7620 BNX2X_DEV_INFO("MCP not active\n");
7621 bp->flags |= NO_MCP_FLAG;
7625 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7626 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7627 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7628 BNX2X_ERR("BAD MCP validity signature\n");
7630 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7631 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7633 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7634 SHARED_HW_CFG_LED_MODE_MASK) >>
7635 SHARED_HW_CFG_LED_MODE_SHIFT);
7637 bp->link_params.feature_config_flags = 0;
7638 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7639 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7640 bp->link_params.feature_config_flags |=
7641 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7643 bp->link_params.feature_config_flags &=
7644 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7646 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7647 bp->common.bc_ver = val;
7648 BNX2X_DEV_INFO("bc_ver %X\n", val);
7649 if (val < BNX2X_BC_VER) {
7650 /* for now only warn
7651 * later we might need to enforce this */
7652 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7653 "please upgrade BC\n", BNX2X_BC_VER, val);
7655 bp->link_params.feature_config_flags |=
7656 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7657 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7659 bp->link_params.feature_config_flags |=
7660 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7661 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7663 if (BP_E1HVN(bp) == 0) {
7664 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7665 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7667 /* no WOL capability for E1HVN != 0 */
7668 bp->flags |= NO_WOL_FLAG;
7670 BNX2X_DEV_INFO("%sWoL capable\n",
7671 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7673 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7674 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7675 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7676 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7678 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7679 val, val2, val3, val4);
7682 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7683 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7685 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7687 int pfid = BP_FUNC(bp);
7688 int vn = BP_E1HVN(bp);
7693 bp->igu_base_sb = 0xff;
7695 if (CHIP_INT_MODE_IS_BC(bp)) {
7696 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7699 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7702 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7703 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7708 /* IGU in normal mode - read CAM */
7709 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7711 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7712 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7715 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7716 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7718 if (IGU_VEC(val) == 0)
7719 /* default status block */
7720 bp->igu_dsb_id = igu_sb_id;
7722 if (bp->igu_base_sb == 0xff)
7723 bp->igu_base_sb = igu_sb_id;
7728 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7729 if (bp->igu_sb_cnt == 0)
7730 BNX2X_ERR("CAM configuration error\n");
7733 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7736 int cfg_size = 0, idx, port = BP_PORT(bp);
7738 /* Aggregation of supported attributes of all external phys */
7739 bp->port.supported[0] = 0;
7740 bp->port.supported[1] = 0;
7741 switch (bp->link_params.num_phys) {
7743 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7747 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7751 if (bp->link_params.multi_phy_config &
7752 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7753 bp->port.supported[1] =
7754 bp->link_params.phy[EXT_PHY1].supported;
7755 bp->port.supported[0] =
7756 bp->link_params.phy[EXT_PHY2].supported;
7758 bp->port.supported[0] =
7759 bp->link_params.phy[EXT_PHY1].supported;
7760 bp->port.supported[1] =
7761 bp->link_params.phy[EXT_PHY2].supported;
7767 if (!(bp->port.supported[0] || bp->port.supported[1])) {
7768 BNX2X_ERR("NVRAM config error. BAD phy config."
7769 "PHY1 config 0x%x, PHY2 config 0x%x\n",
7771 dev_info.port_hw_config[port].external_phy_config),
7773 dev_info.port_hw_config[port].external_phy_config2));
7777 switch (switch_cfg) {
7779 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7781 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7784 case SWITCH_CFG_10G:
7785 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7787 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7791 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7792 bp->port.link_config[0]);
7795 /* mask what we support according to speed_cap_mask per configuration */
7796 for (idx = 0; idx < cfg_size; idx++) {
7797 if (!(bp->link_params.speed_cap_mask[idx] &
7798 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7799 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
7801 if (!(bp->link_params.speed_cap_mask[idx] &
7802 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7803 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
7805 if (!(bp->link_params.speed_cap_mask[idx] &
7806 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7807 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
7809 if (!(bp->link_params.speed_cap_mask[idx] &
7810 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7811 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
7813 if (!(bp->link_params.speed_cap_mask[idx] &
7814 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7815 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
7816 SUPPORTED_1000baseT_Full);
7818 if (!(bp->link_params.speed_cap_mask[idx] &
7819 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7820 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
7822 if (!(bp->link_params.speed_cap_mask[idx] &
7823 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7824 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7828 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7829 bp->port.supported[1]);
7832 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7834 u32 link_config, idx, cfg_size = 0;
7835 bp->port.advertising[0] = 0;
7836 bp->port.advertising[1] = 0;
7837 switch (bp->link_params.num_phys) {
7846 for (idx = 0; idx < cfg_size; idx++) {
7847 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7848 link_config = bp->port.link_config[idx];
7849 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7850 case PORT_FEATURE_LINK_SPEED_AUTO:
7851 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7852 bp->link_params.req_line_speed[idx] =
7854 bp->port.advertising[idx] |=
7855 bp->port.supported[idx];
7857 /* force 10G, no AN */
7858 bp->link_params.req_line_speed[idx] =
7860 bp->port.advertising[idx] |=
7861 (ADVERTISED_10000baseT_Full |
7867 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7868 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7869 bp->link_params.req_line_speed[idx] =
7871 bp->port.advertising[idx] |=
7872 (ADVERTISED_10baseT_Full |
7875 BNX2X_ERROR("NVRAM config error. "
7876 "Invalid link_config 0x%x"
7877 " speed_cap_mask 0x%x\n",
7879 bp->link_params.speed_cap_mask[idx]);
7884 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7885 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7886 bp->link_params.req_line_speed[idx] =
7888 bp->link_params.req_duplex[idx] =
7890 bp->port.advertising[idx] |=
7891 (ADVERTISED_10baseT_Half |
7894 BNX2X_ERROR("NVRAM config error. "
7895 "Invalid link_config 0x%x"
7896 " speed_cap_mask 0x%x\n",
7898 bp->link_params.speed_cap_mask[idx]);
7903 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7904 if (bp->port.supported[idx] &
7905 SUPPORTED_100baseT_Full) {
7906 bp->link_params.req_line_speed[idx] =
7908 bp->port.advertising[idx] |=
7909 (ADVERTISED_100baseT_Full |
7912 BNX2X_ERROR("NVRAM config error. "
7913 "Invalid link_config 0x%x"
7914 " speed_cap_mask 0x%x\n",
7916 bp->link_params.speed_cap_mask[idx]);
7921 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7922 if (bp->port.supported[idx] &
7923 SUPPORTED_100baseT_Half) {
7924 bp->link_params.req_line_speed[idx] =
7926 bp->link_params.req_duplex[idx] =
7928 bp->port.advertising[idx] |=
7929 (ADVERTISED_100baseT_Half |
7932 BNX2X_ERROR("NVRAM config error. "
7933 "Invalid link_config 0x%x"
7934 " speed_cap_mask 0x%x\n",
7936 bp->link_params.speed_cap_mask[idx]);
7941 case PORT_FEATURE_LINK_SPEED_1G:
7942 if (bp->port.supported[idx] &
7943 SUPPORTED_1000baseT_Full) {
7944 bp->link_params.req_line_speed[idx] =
7946 bp->port.advertising[idx] |=
7947 (ADVERTISED_1000baseT_Full |
7950 BNX2X_ERROR("NVRAM config error. "
7951 "Invalid link_config 0x%x"
7952 " speed_cap_mask 0x%x\n",
7954 bp->link_params.speed_cap_mask[idx]);
7959 case PORT_FEATURE_LINK_SPEED_2_5G:
7960 if (bp->port.supported[idx] &
7961 SUPPORTED_2500baseX_Full) {
7962 bp->link_params.req_line_speed[idx] =
7964 bp->port.advertising[idx] |=
7965 (ADVERTISED_2500baseX_Full |
7968 BNX2X_ERROR("NVRAM config error. "
7969 "Invalid link_config 0x%x"
7970 " speed_cap_mask 0x%x\n",
7972 bp->link_params.speed_cap_mask[idx]);
7977 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7978 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7979 case PORT_FEATURE_LINK_SPEED_10G_KR:
7980 if (bp->port.supported[idx] &
7981 SUPPORTED_10000baseT_Full) {
7982 bp->link_params.req_line_speed[idx] =
7984 bp->port.advertising[idx] |=
7985 (ADVERTISED_10000baseT_Full |
7988 BNX2X_ERROR("NVRAM config error. "
7989 "Invalid link_config 0x%x"
7990 " speed_cap_mask 0x%x\n",
7992 bp->link_params.speed_cap_mask[idx]);
7998 BNX2X_ERROR("NVRAM config error. "
7999 "BAD link speed link_config 0x%x\n",
8001 bp->link_params.req_line_speed[idx] =
8003 bp->port.advertising[idx] =
8004 bp->port.supported[idx];
8008 bp->link_params.req_flow_ctrl[idx] = (link_config &
8009 PORT_FEATURE_FLOW_CONTROL_MASK);
8010 if ((bp->link_params.req_flow_ctrl[idx] ==
8011 BNX2X_FLOW_CTRL_AUTO) &&
8012 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8013 bp->link_params.req_flow_ctrl[idx] =
8014 BNX2X_FLOW_CTRL_NONE;
8017 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8018 " 0x%x advertising 0x%x\n",
8019 bp->link_params.req_line_speed[idx],
8020 bp->link_params.req_duplex[idx],
8021 bp->link_params.req_flow_ctrl[idx],
8022 bp->port.advertising[idx]);
8026 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8028 mac_hi = cpu_to_be16(mac_hi);
8029 mac_lo = cpu_to_be32(mac_lo);
8030 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8031 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8034 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8036 int port = BP_PORT(bp);
8039 u32 ext_phy_type, ext_phy_config;;
8041 bp->link_params.bp = bp;
8042 bp->link_params.port = port;
8044 bp->link_params.lane_config =
8045 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8047 bp->link_params.speed_cap_mask[0] =
8049 dev_info.port_hw_config[port].speed_capability_mask);
8050 bp->link_params.speed_cap_mask[1] =
8052 dev_info.port_hw_config[port].speed_capability_mask2);
8053 bp->port.link_config[0] =
8054 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8056 bp->port.link_config[1] =
8057 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8059 bp->link_params.multi_phy_config =
8060 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8061 /* If the device is capable of WoL, set the default state according
8064 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8065 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8066 (config & PORT_FEATURE_WOL_ENABLED));
8068 BNX2X_DEV_INFO("lane_config 0x%08x "
8069 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
8070 bp->link_params.lane_config,
8071 bp->link_params.speed_cap_mask[0],
8072 bp->port.link_config[0]);
8074 bp->link_params.switch_cfg = (bp->port.link_config[0] &
8075 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8076 bnx2x_phy_probe(&bp->link_params);
8077 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8079 bnx2x_link_settings_requested(bp);
8082 * If connected directly, work with the internal PHY, otherwise, work
8083 * with the external PHY
8087 dev_info.port_hw_config[port].external_phy_config);
8088 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8089 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8090 bp->mdio.prtad = bp->port.phy_addr;
8092 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8093 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8095 XGXS_EXT_PHY_ADDR(ext_phy_config);
8097 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8098 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8099 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8100 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8101 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8104 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8105 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8106 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8110 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8112 int func = BP_ABS_FUNC(bp);
8117 bnx2x_get_common_hwinfo(bp);
8119 if (CHIP_IS_E1x(bp)) {
8120 bp->common.int_block = INT_BLOCK_HC;
8122 bp->igu_dsb_id = DEF_SB_IGU_ID;
8123 bp->igu_base_sb = 0;
8124 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8126 bp->common.int_block = INT_BLOCK_IGU;
8127 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8128 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8129 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8130 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8132 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8134 bnx2x_get_igu_cam_info(bp);
8137 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8138 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8141 * Initialize MF configuration
8147 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8148 if (SHMEM2_HAS(bp, mf_cfg_addr))
8149 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8151 bp->common.mf_cfg_base = bp->common.shmem_base +
8152 offsetof(struct shmem_region, func_mb) +
8153 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8155 MF_CFG_RD(bp, func_mf_config[func].config);
8157 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8158 FUNC_MF_CFG_E1HOV_TAG_MASK);
8159 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8161 BNX2X_DEV_INFO("%s function mode\n",
8162 IS_MF(bp) ? "multi" : "single");
8165 val = (MF_CFG_RD(bp, func_mf_config[func].
8167 FUNC_MF_CFG_E1HOV_TAG_MASK);
8168 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8170 BNX2X_DEV_INFO("MF OV for func %d is %d "
8172 func, bp->mf_ov, bp->mf_ov);
8174 BNX2X_ERROR("No valid MF OV for func %d,"
8175 " aborting\n", func);
8180 BNX2X_ERROR("VN %d in single function mode,"
8181 " aborting\n", BP_E1HVN(bp));
8187 /* adjust igu_sb_cnt to MF for E1x */
8188 if (CHIP_IS_E1x(bp) && IS_MF(bp))
8189 bp->igu_sb_cnt /= E1HVN_MAX;
8192 * adjust E2 sb count: to be removed when FW will support
8193 * more then 16 L2 clients
8195 #define MAX_L2_CLIENTS 16
8197 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8198 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8200 if (!BP_NOMCP(bp)) {
8201 bnx2x_get_port_hwinfo(bp);
8204 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8205 DRV_MSG_SEQ_NUMBER_MASK);
8206 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8210 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8211 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8212 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8213 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8214 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8215 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8216 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8217 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8218 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8219 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8220 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8222 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8230 /* only supposed to happen on emulation/FPGA */
8231 BNX2X_ERROR("warning: random MAC workaround active\n");
8232 random_ether_addr(bp->dev->dev_addr);
8233 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8239 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8241 int cnt, i, block_end, rodi;
8242 char vpd_data[BNX2X_VPD_LEN+1];
8243 char str_id_reg[VENDOR_ID_LEN+1];
8244 char str_id_cap[VENDOR_ID_LEN+1];
8247 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8248 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8250 if (cnt < BNX2X_VPD_LEN)
8253 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8254 PCI_VPD_LRDT_RO_DATA);
8259 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8260 pci_vpd_lrdt_size(&vpd_data[i]);
8262 i += PCI_VPD_LRDT_TAG_SIZE;
8264 if (block_end > BNX2X_VPD_LEN)
8267 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8268 PCI_VPD_RO_KEYWORD_MFR_ID);
8272 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8274 if (len != VENDOR_ID_LEN)
8277 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8279 /* vendor specific info */
8280 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8281 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8282 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8283 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8285 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8286 PCI_VPD_RO_KEYWORD_VENDOR0);
8288 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8290 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8292 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8293 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8294 bp->fw_ver[len] = ' ';
8303 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8309 /* Disable interrupt handling until HW is initialized */
8310 atomic_set(&bp->intr_sem, 1);
8311 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8313 mutex_init(&bp->port.phy_mutex);
8314 mutex_init(&bp->fw_mb_mutex);
8315 spin_lock_init(&bp->stats_lock);
8317 mutex_init(&bp->cnic_mutex);
8320 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8321 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8323 rc = bnx2x_get_hwinfo(bp);
8326 rc = bnx2x_alloc_mem_bp(bp);
8328 bnx2x_read_fwinfo(bp);
8332 /* need to reset chip if undi was active */
8334 bnx2x_undi_unload(bp);
8336 if (CHIP_REV_IS_FPGA(bp))
8337 dev_err(&bp->pdev->dev, "FPGA detected\n");
8339 if (BP_NOMCP(bp) && (func == 0))
8340 dev_err(&bp->pdev->dev, "MCP disabled, "
8341 "must load devices in order!\n");
8343 /* Set multi queue mode */
8344 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8345 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8346 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8347 "requested is not MSI-X\n");
8348 multi_mode = ETH_RSS_MODE_DISABLED;
8350 bp->multi_mode = multi_mode;
8351 bp->int_mode = int_mode;
8353 bp->dev->features |= NETIF_F_GRO;
8357 bp->flags &= ~TPA_ENABLE_FLAG;
8358 bp->dev->features &= ~NETIF_F_LRO;
8360 bp->flags |= TPA_ENABLE_FLAG;
8361 bp->dev->features |= NETIF_F_LRO;
8363 bp->disable_tpa = disable_tpa;
8366 bp->dropless_fc = 0;
8368 bp->dropless_fc = dropless_fc;
8372 bp->tx_ring_size = MAX_TX_AVAIL;
8376 /* make sure that the numbers are in the right granularity */
8377 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8378 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8380 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8381 bp->current_interval = (poll ? poll : timer_interval);
8383 init_timer(&bp->timer);
8384 bp->timer.expires = jiffies + bp->current_interval;
8385 bp->timer.data = (unsigned long) bp;
8386 bp->timer.function = bnx2x_timer;
8392 /****************************************************************************
8393 * General service functions
8394 ****************************************************************************/
8396 /* called with rtnl_lock */
8397 static int bnx2x_open(struct net_device *dev)
8399 struct bnx2x *bp = netdev_priv(dev);
8401 netif_carrier_off(dev);
8403 bnx2x_set_power_state(bp, PCI_D0);
8405 if (!bnx2x_reset_is_done(bp)) {
8407 /* Reset MCP mail box sequence if there is on going
8412 /* If it's the first function to load and reset done
8413 * is still not cleared it may mean that. We don't
8414 * check the attention state here because it may have
8415 * already been cleared by a "common" reset but we
8416 * shell proceed with "process kill" anyway.
8418 if ((bnx2x_get_load_cnt(bp) == 0) &&
8419 bnx2x_trylock_hw_lock(bp,
8420 HW_LOCK_RESOURCE_RESERVED_08) &&
8421 (!bnx2x_leader_reset(bp))) {
8422 DP(NETIF_MSG_HW, "Recovered in open\n");
8426 bnx2x_set_power_state(bp, PCI_D3hot);
8428 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8429 " completed yet. Try again later. If u still see this"
8430 " message after a few retries then power cycle is"
8431 " required.\n", bp->dev->name);
8437 bp->recovery_state = BNX2X_RECOVERY_DONE;
8439 return bnx2x_nic_load(bp, LOAD_OPEN);
8442 /* called with rtnl_lock */
8443 static int bnx2x_close(struct net_device *dev)
8445 struct bnx2x *bp = netdev_priv(dev);
8447 /* Unload the driver, release IRQs */
8448 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8449 bnx2x_set_power_state(bp, PCI_D3hot);
8454 /* called with netif_tx_lock from dev_mcast.c */
8455 void bnx2x_set_rx_mode(struct net_device *dev)
8457 struct bnx2x *bp = netdev_priv(dev);
8458 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8459 int port = BP_PORT(bp);
8461 if (bp->state != BNX2X_STATE_OPEN) {
8462 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8466 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8468 if (dev->flags & IFF_PROMISC)
8469 rx_mode = BNX2X_RX_MODE_PROMISC;
8470 else if ((dev->flags & IFF_ALLMULTI) ||
8471 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8473 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8474 else { /* some multicasts */
8475 if (CHIP_IS_E1(bp)) {
8477 * set mc list, do not wait as wait implies sleep
8478 * and set_rx_mode can be invoked from non-sleepable
8481 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8482 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8483 BNX2X_MAX_MULTICAST*(1 + port));
8485 bnx2x_set_e1_mc_list(bp, offset);
8487 /* Accept one or more multicasts */
8488 struct netdev_hw_addr *ha;
8489 u32 mc_filter[MC_HASH_SIZE];
8490 u32 crc, bit, regidx;
8493 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8495 netdev_for_each_mc_addr(ha, dev) {
8496 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8499 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8501 bit = (crc >> 24) & 0xff;
8504 mc_filter[regidx] |= (1 << bit);
8507 for (i = 0; i < MC_HASH_SIZE; i++)
8508 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8513 bp->rx_mode = rx_mode;
8514 bnx2x_set_storm_rx_mode(bp);
8517 /* called with rtnl_lock */
8518 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8519 int devad, u16 addr)
8521 struct bnx2x *bp = netdev_priv(netdev);
8525 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8526 prtad, devad, addr);
8528 /* The HW expects different devad if CL22 is used */
8529 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8531 bnx2x_acquire_phy_lock(bp);
8532 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8533 bnx2x_release_phy_lock(bp);
8534 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8541 /* called with rtnl_lock */
8542 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8543 u16 addr, u16 value)
8545 struct bnx2x *bp = netdev_priv(netdev);
8548 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8549 " value 0x%x\n", prtad, devad, addr, value);
8551 /* The HW expects different devad if CL22 is used */
8552 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8554 bnx2x_acquire_phy_lock(bp);
8555 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8556 bnx2x_release_phy_lock(bp);
8560 /* called with rtnl_lock */
8561 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8563 struct bnx2x *bp = netdev_priv(dev);
8564 struct mii_ioctl_data *mdio = if_mii(ifr);
8566 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8567 mdio->phy_id, mdio->reg_num, mdio->val_in);
8569 if (!netif_running(dev))
8572 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8575 #ifdef CONFIG_NET_POLL_CONTROLLER
8576 static void poll_bnx2x(struct net_device *dev)
8578 struct bnx2x *bp = netdev_priv(dev);
8580 disable_irq(bp->pdev->irq);
8581 bnx2x_interrupt(bp->pdev->irq, dev);
8582 enable_irq(bp->pdev->irq);
8586 static const struct net_device_ops bnx2x_netdev_ops = {
8587 .ndo_open = bnx2x_open,
8588 .ndo_stop = bnx2x_close,
8589 .ndo_start_xmit = bnx2x_start_xmit,
8590 .ndo_set_multicast_list = bnx2x_set_rx_mode,
8591 .ndo_set_mac_address = bnx2x_change_mac_addr,
8592 .ndo_validate_addr = eth_validate_addr,
8593 .ndo_do_ioctl = bnx2x_ioctl,
8594 .ndo_change_mtu = bnx2x_change_mtu,
8595 .ndo_tx_timeout = bnx2x_tx_timeout,
8597 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8599 #ifdef CONFIG_NET_POLL_CONTROLLER
8600 .ndo_poll_controller = poll_bnx2x,
8604 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8605 struct net_device *dev)
8610 SET_NETDEV_DEV(dev, &pdev->dev);
8611 bp = netdev_priv(dev);
8616 bp->pf_num = PCI_FUNC(pdev->devfn);
8618 rc = pci_enable_device(pdev);
8620 dev_err(&bp->pdev->dev,
8621 "Cannot enable PCI device, aborting\n");
8625 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8626 dev_err(&bp->pdev->dev,
8627 "Cannot find PCI device base address, aborting\n");
8629 goto err_out_disable;
8632 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8633 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8634 " base address, aborting\n");
8636 goto err_out_disable;
8639 if (atomic_read(&pdev->enable_cnt) == 1) {
8640 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8642 dev_err(&bp->pdev->dev,
8643 "Cannot obtain PCI resources, aborting\n");
8644 goto err_out_disable;
8647 pci_set_master(pdev);
8648 pci_save_state(pdev);
8651 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8652 if (bp->pm_cap == 0) {
8653 dev_err(&bp->pdev->dev,
8654 "Cannot find power management capability, aborting\n");
8656 goto err_out_release;
8659 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8660 if (bp->pcie_cap == 0) {
8661 dev_err(&bp->pdev->dev,
8662 "Cannot find PCI Express capability, aborting\n");
8664 goto err_out_release;
8667 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
8668 bp->flags |= USING_DAC_FLAG;
8669 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
8670 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8671 " failed, aborting\n");
8673 goto err_out_release;
8676 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
8677 dev_err(&bp->pdev->dev,
8678 "System does not support DMA, aborting\n");
8680 goto err_out_release;
8683 dev->mem_start = pci_resource_start(pdev, 0);
8684 dev->base_addr = dev->mem_start;
8685 dev->mem_end = pci_resource_end(pdev, 0);
8687 dev->irq = pdev->irq;
8689 bp->regview = pci_ioremap_bar(pdev, 0);
8691 dev_err(&bp->pdev->dev,
8692 "Cannot map register space, aborting\n");
8694 goto err_out_release;
8697 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8698 min_t(u64, BNX2X_DB_SIZE(bp),
8699 pci_resource_len(pdev, 2)));
8700 if (!bp->doorbells) {
8701 dev_err(&bp->pdev->dev,
8702 "Cannot map doorbell space, aborting\n");
8707 bnx2x_set_power_state(bp, PCI_D0);
8709 /* clean indirect addresses */
8710 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8711 PCICFG_VENDOR_ID_OFFSET);
8712 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8713 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8714 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8715 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
8717 /* Reset the load counter */
8718 bnx2x_clear_load_cnt(bp);
8720 dev->watchdog_timeo = TX_TIMEOUT;
8722 dev->netdev_ops = &bnx2x_netdev_ops;
8723 bnx2x_set_ethtool_ops(dev);
8724 dev->features |= NETIF_F_SG;
8725 dev->features |= NETIF_F_HW_CSUM;
8726 if (bp->flags & USING_DAC_FLAG)
8727 dev->features |= NETIF_F_HIGHDMA;
8728 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8729 dev->features |= NETIF_F_TSO6;
8731 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8732 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
8734 dev->vlan_features |= NETIF_F_SG;
8735 dev->vlan_features |= NETIF_F_HW_CSUM;
8736 if (bp->flags & USING_DAC_FLAG)
8737 dev->vlan_features |= NETIF_F_HIGHDMA;
8738 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8739 dev->vlan_features |= NETIF_F_TSO6;
8742 /* get_port_hwinfo() will set prtad and mmds properly */
8743 bp->mdio.prtad = MDIO_PRTAD_NONE;
8745 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8747 bp->mdio.mdio_read = bnx2x_mdio_read;
8748 bp->mdio.mdio_write = bnx2x_mdio_write;
8754 iounmap(bp->regview);
8757 if (bp->doorbells) {
8758 iounmap(bp->doorbells);
8759 bp->doorbells = NULL;
8763 if (atomic_read(&pdev->enable_cnt) == 1)
8764 pci_release_regions(pdev);
8767 pci_disable_device(pdev);
8768 pci_set_drvdata(pdev, NULL);
8774 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8775 int *width, int *speed)
8777 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8779 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8781 /* return value of 1=2.5GHz 2=5GHz */
8782 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8785 static int bnx2x_check_firmware(struct bnx2x *bp)
8787 const struct firmware *firmware = bp->firmware;
8788 struct bnx2x_fw_file_hdr *fw_hdr;
8789 struct bnx2x_fw_file_section *sections;
8790 u32 offset, len, num_ops;
8795 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8798 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8799 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8801 /* Make sure none of the offsets and sizes make us read beyond
8802 * the end of the firmware data */
8803 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8804 offset = be32_to_cpu(sections[i].offset);
8805 len = be32_to_cpu(sections[i].len);
8806 if (offset + len > firmware->size) {
8807 dev_err(&bp->pdev->dev,
8808 "Section %d length is out of bounds\n", i);
8813 /* Likewise for the init_ops offsets */
8814 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8815 ops_offsets = (u16 *)(firmware->data + offset);
8816 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8818 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8819 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
8820 dev_err(&bp->pdev->dev,
8821 "Section offset %d is out of bounds\n", i);
8826 /* Check FW version */
8827 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8828 fw_ver = firmware->data + offset;
8829 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8830 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8831 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8832 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
8833 dev_err(&bp->pdev->dev,
8834 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8835 fw_ver[0], fw_ver[1], fw_ver[2],
8836 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8837 BCM_5710_FW_MINOR_VERSION,
8838 BCM_5710_FW_REVISION_VERSION,
8839 BCM_5710_FW_ENGINEERING_VERSION);
8846 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8848 const __be32 *source = (const __be32 *)_source;
8849 u32 *target = (u32 *)_target;
8852 for (i = 0; i < n/4; i++)
8853 target[i] = be32_to_cpu(source[i]);
8857 Ops array is stored in the following format:
8858 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8860 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
8862 const __be32 *source = (const __be32 *)_source;
8863 struct raw_op *target = (struct raw_op *)_target;
8866 for (i = 0, j = 0; i < n/8; i++, j += 2) {
8867 tmp = be32_to_cpu(source[j]);
8868 target[i].op = (tmp >> 24) & 0xff;
8869 target[i].offset = tmp & 0xffffff;
8870 target[i].raw_data = be32_to_cpu(source[j + 1]);
8875 * IRO array is stored in the following format:
8876 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8878 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8880 const __be32 *source = (const __be32 *)_source;
8881 struct iro *target = (struct iro *)_target;
8884 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8885 target[i].base = be32_to_cpu(source[j]);
8887 tmp = be32_to_cpu(source[j]);
8888 target[i].m1 = (tmp >> 16) & 0xffff;
8889 target[i].m2 = tmp & 0xffff;
8891 tmp = be32_to_cpu(source[j]);
8892 target[i].m3 = (tmp >> 16) & 0xffff;
8893 target[i].size = tmp & 0xffff;
8898 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8900 const __be16 *source = (const __be16 *)_source;
8901 u16 *target = (u16 *)_target;
8904 for (i = 0; i < n/2; i++)
8905 target[i] = be16_to_cpu(source[i]);
8908 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8910 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8911 bp->arr = kmalloc(len, GFP_KERNEL); \
8913 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8916 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8917 (u8 *)bp->arr, len); \
8920 int bnx2x_init_firmware(struct bnx2x *bp)
8922 const char *fw_file_name;
8923 struct bnx2x_fw_file_hdr *fw_hdr;
8927 fw_file_name = FW_FILE_NAME_E1;
8928 else if (CHIP_IS_E1H(bp))
8929 fw_file_name = FW_FILE_NAME_E1H;
8930 else if (CHIP_IS_E2(bp))
8931 fw_file_name = FW_FILE_NAME_E2;
8933 BNX2X_ERR("Unsupported chip revision\n");
8937 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
8939 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
8941 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
8942 goto request_firmware_exit;
8945 rc = bnx2x_check_firmware(bp);
8947 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
8948 goto request_firmware_exit;
8951 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8953 /* Initialize the pointers to the init arrays */
8955 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8958 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8961 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8964 /* STORMs firmware */
8965 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8966 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8967 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8968 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8969 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8970 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8971 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8972 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8973 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8974 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8975 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8976 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8977 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8978 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8979 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8980 be32_to_cpu(fw_hdr->csem_pram_data.offset);
8982 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
8987 kfree(bp->init_ops_offsets);
8988 init_offsets_alloc_err:
8989 kfree(bp->init_ops);
8991 kfree(bp->init_data);
8992 request_firmware_exit:
8993 release_firmware(bp->firmware);
8998 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9000 int cid_count = L2_FP_COUNT(l2_cid_count);
9003 cid_count += CNIC_CID_MAX;
9005 return roundup(cid_count, QM_CID_ROUND);
9008 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9009 const struct pci_device_id *ent)
9011 struct net_device *dev = NULL;
9013 int pcie_width, pcie_speed;
9016 switch (ent->driver_data) {
9020 cid_count = FP_SB_MAX_E1x;
9025 cid_count = FP_SB_MAX_E2;
9029 pr_err("Unknown board_type (%ld), aborting\n",
9034 cid_count += CNIC_CONTEXT_USE;
9036 /* dev zeroed in init_etherdev */
9037 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9039 dev_err(&pdev->dev, "Cannot allocate net device\n");
9043 bp = netdev_priv(dev);
9044 bp->msg_enable = debug;
9046 pci_set_drvdata(pdev, dev);
9048 bp->l2_cid_count = cid_count;
9050 rc = bnx2x_init_dev(pdev, dev);
9056 rc = bnx2x_init_bp(bp);
9060 /* calc qm_cid_count */
9061 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9063 rc = register_netdev(dev);
9065 dev_err(&pdev->dev, "Cannot register net device\n");
9069 /* Configure interupt mode: try to enable MSI-X/MSI if
9070 * needed, set bp->num_queues appropriately.
9072 bnx2x_set_int_mode(bp);
9074 /* Add all NAPI objects */
9075 bnx2x_add_all_napi(bp);
9077 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9079 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9080 " IRQ %d, ", board_info[ent->driver_data].name,
9081 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9083 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9084 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9085 "5GHz (Gen2)" : "2.5GHz",
9086 dev->base_addr, bp->pdev->irq);
9087 pr_cont("node addr %pM\n", dev->dev_addr);
9093 iounmap(bp->regview);
9096 iounmap(bp->doorbells);
9100 if (atomic_read(&pdev->enable_cnt) == 1)
9101 pci_release_regions(pdev);
9103 pci_disable_device(pdev);
9104 pci_set_drvdata(pdev, NULL);
9109 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9111 struct net_device *dev = pci_get_drvdata(pdev);
9115 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9118 bp = netdev_priv(dev);
9120 unregister_netdev(dev);
9122 /* Delete all NAPI objects */
9123 bnx2x_del_all_napi(bp);
9125 /* Disable MSI/MSI-X */
9126 bnx2x_disable_msi(bp);
9128 /* Make sure RESET task is not scheduled before continuing */
9129 cancel_delayed_work_sync(&bp->reset_task);
9132 iounmap(bp->regview);
9135 iounmap(bp->doorbells);
9137 bnx2x_free_mem_bp(bp);
9141 if (atomic_read(&pdev->enable_cnt) == 1)
9142 pci_release_regions(pdev);
9144 pci_disable_device(pdev);
9145 pci_set_drvdata(pdev, NULL);
9148 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9152 bp->state = BNX2X_STATE_ERROR;
9154 bp->rx_mode = BNX2X_RX_MODE_NONE;
9156 bnx2x_netif_stop(bp, 0);
9157 netif_carrier_off(bp->dev);
9159 del_timer_sync(&bp->timer);
9160 bp->stats_state = STATS_STATE_DISABLED;
9161 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9166 /* Free SKBs, SGEs, TPA pool and driver internals */
9167 bnx2x_free_skbs(bp);
9169 for_each_queue(bp, i)
9170 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9174 bp->state = BNX2X_STATE_CLOSED;
9179 static void bnx2x_eeh_recover(struct bnx2x *bp)
9183 mutex_init(&bp->port.phy_mutex);
9185 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9186 bp->link_params.shmem_base = bp->common.shmem_base;
9187 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9189 if (!bp->common.shmem_base ||
9190 (bp->common.shmem_base < 0xA0000) ||
9191 (bp->common.shmem_base >= 0xC0000)) {
9192 BNX2X_DEV_INFO("MCP not active\n");
9193 bp->flags |= NO_MCP_FLAG;
9197 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9198 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9199 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9200 BNX2X_ERR("BAD MCP validity signature\n");
9202 if (!BP_NOMCP(bp)) {
9204 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9205 DRV_MSG_SEQ_NUMBER_MASK);
9206 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9211 * bnx2x_io_error_detected - called when PCI error is detected
9212 * @pdev: Pointer to PCI device
9213 * @state: The current pci connection state
9215 * This function is called after a PCI bus error affecting
9216 * this device has been detected.
9218 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9219 pci_channel_state_t state)
9221 struct net_device *dev = pci_get_drvdata(pdev);
9222 struct bnx2x *bp = netdev_priv(dev);
9226 netif_device_detach(dev);
9228 if (state == pci_channel_io_perm_failure) {
9230 return PCI_ERS_RESULT_DISCONNECT;
9233 if (netif_running(dev))
9234 bnx2x_eeh_nic_unload(bp);
9236 pci_disable_device(pdev);
9240 /* Request a slot reset */
9241 return PCI_ERS_RESULT_NEED_RESET;
9245 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9246 * @pdev: Pointer to PCI device
9248 * Restart the card from scratch, as if from a cold-boot.
9250 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9252 struct net_device *dev = pci_get_drvdata(pdev);
9253 struct bnx2x *bp = netdev_priv(dev);
9257 if (pci_enable_device(pdev)) {
9259 "Cannot re-enable PCI device after reset\n");
9261 return PCI_ERS_RESULT_DISCONNECT;
9264 pci_set_master(pdev);
9265 pci_restore_state(pdev);
9267 if (netif_running(dev))
9268 bnx2x_set_power_state(bp, PCI_D0);
9272 return PCI_ERS_RESULT_RECOVERED;
9276 * bnx2x_io_resume - called when traffic can start flowing again
9277 * @pdev: Pointer to PCI device
9279 * This callback is called when the error recovery driver tells us that
9280 * its OK to resume normal operation.
9282 static void bnx2x_io_resume(struct pci_dev *pdev)
9284 struct net_device *dev = pci_get_drvdata(pdev);
9285 struct bnx2x *bp = netdev_priv(dev);
9287 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9288 printk(KERN_ERR "Handling parity error recovery. "
9289 "Try again later\n");
9295 bnx2x_eeh_recover(bp);
9297 if (netif_running(dev))
9298 bnx2x_nic_load(bp, LOAD_NORMAL);
9300 netif_device_attach(dev);
9305 static struct pci_error_handlers bnx2x_err_handler = {
9306 .error_detected = bnx2x_io_error_detected,
9307 .slot_reset = bnx2x_io_slot_reset,
9308 .resume = bnx2x_io_resume,
9311 static struct pci_driver bnx2x_pci_driver = {
9312 .name = DRV_MODULE_NAME,
9313 .id_table = bnx2x_pci_tbl,
9314 .probe = bnx2x_init_one,
9315 .remove = __devexit_p(bnx2x_remove_one),
9316 .suspend = bnx2x_suspend,
9317 .resume = bnx2x_resume,
9318 .err_handler = &bnx2x_err_handler,
9321 static int __init bnx2x_init(void)
9325 pr_info("%s", version);
9327 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9328 if (bnx2x_wq == NULL) {
9329 pr_err("Cannot create workqueue\n");
9333 ret = pci_register_driver(&bnx2x_pci_driver);
9335 pr_err("Cannot register driver\n");
9336 destroy_workqueue(bnx2x_wq);
9341 static void __exit bnx2x_cleanup(void)
9343 pci_unregister_driver(&bnx2x_pci_driver);
9345 destroy_workqueue(bnx2x_wq);
9348 module_init(bnx2x_init);
9349 module_exit(bnx2x_cleanup);
9353 /* count denotes the number of new completions we have seen */
9354 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9356 struct eth_spe *spe;
9358 #ifdef BNX2X_STOP_ON_ERROR
9359 if (unlikely(bp->panic))
9363 spin_lock_bh(&bp->spq_lock);
9364 BUG_ON(bp->cnic_spq_pending < count);
9365 bp->cnic_spq_pending -= count;
9368 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9369 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9370 & SPE_HDR_CONN_TYPE) >>
9371 SPE_HDR_CONN_TYPE_SHIFT;
9373 /* Set validation for iSCSI L2 client before sending SETUP
9376 if (type == ETH_CONNECTION_TYPE) {
9377 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9378 hdr.conn_and_cmd_data) >>
9379 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9381 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9382 bnx2x_set_ctx_validation(&bp->context.
9383 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9384 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9387 /* There may be not more than 8 L2 and COMMON SPEs and not more
9388 * than 8 L5 SPEs in the air.
9390 if ((type == NONE_CONNECTION_TYPE) ||
9391 (type == ETH_CONNECTION_TYPE)) {
9392 if (!atomic_read(&bp->spq_left))
9395 atomic_dec(&bp->spq_left);
9396 } else if (type == ISCSI_CONNECTION_TYPE) {
9397 if (bp->cnic_spq_pending >=
9398 bp->cnic_eth_dev.max_kwqe_pending)
9401 bp->cnic_spq_pending++;
9403 BNX2X_ERR("Unknown SPE type: %d\n", type);
9408 spe = bnx2x_sp_get_next(bp);
9409 *spe = *bp->cnic_kwq_cons;
9411 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9412 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9414 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9415 bp->cnic_kwq_cons = bp->cnic_kwq;
9417 bp->cnic_kwq_cons++;
9419 bnx2x_sp_prod_update(bp);
9420 spin_unlock_bh(&bp->spq_lock);
9423 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9424 struct kwqe_16 *kwqes[], u32 count)
9426 struct bnx2x *bp = netdev_priv(dev);
9429 #ifdef BNX2X_STOP_ON_ERROR
9430 if (unlikely(bp->panic))
9434 spin_lock_bh(&bp->spq_lock);
9436 for (i = 0; i < count; i++) {
9437 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9439 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9442 *bp->cnic_kwq_prod = *spe;
9444 bp->cnic_kwq_pending++;
9446 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9447 spe->hdr.conn_and_cmd_data, spe->hdr.type,
9448 spe->data.update_data_addr.hi,
9449 spe->data.update_data_addr.lo,
9450 bp->cnic_kwq_pending);
9452 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9453 bp->cnic_kwq_prod = bp->cnic_kwq;
9455 bp->cnic_kwq_prod++;
9458 spin_unlock_bh(&bp->spq_lock);
9460 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9461 bnx2x_cnic_sp_post(bp, 0);
9466 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9468 struct cnic_ops *c_ops;
9471 mutex_lock(&bp->cnic_mutex);
9472 c_ops = bp->cnic_ops;
9474 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9475 mutex_unlock(&bp->cnic_mutex);
9480 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9482 struct cnic_ops *c_ops;
9486 c_ops = rcu_dereference(bp->cnic_ops);
9488 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9495 * for commands that have no data
9497 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9499 struct cnic_ctl_info ctl = {0};
9503 return bnx2x_cnic_ctl_send(bp, &ctl);
9506 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9508 struct cnic_ctl_info ctl;
9510 /* first we tell CNIC and only then we count this as a completion */
9511 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9512 ctl.data.comp.cid = cid;
9514 bnx2x_cnic_ctl_send_bh(bp, &ctl);
9515 bnx2x_cnic_sp_post(bp, 0);
9518 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9520 struct bnx2x *bp = netdev_priv(dev);
9524 case DRV_CTL_CTXTBL_WR_CMD: {
9525 u32 index = ctl->data.io.offset;
9526 dma_addr_t addr = ctl->data.io.dma_addr;
9528 bnx2x_ilt_wr(bp, index, addr);
9532 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9533 int count = ctl->data.credit.credit_count;
9535 bnx2x_cnic_sp_post(bp, count);
9539 /* rtnl_lock is held. */
9540 case DRV_CTL_START_L2_CMD: {
9541 u32 cli = ctl->data.ring.client_id;
9543 /* Set iSCSI MAC address */
9544 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9549 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9550 * because it's the only way for UIO Client to accept
9551 * multicasts (in non-promiscuous mode only one Client per
9552 * function will receive multicast packets (leading in our
9555 bnx2x_rxq_set_mac_filters(bp, cli,
9556 BNX2X_ACCEPT_UNICAST |
9557 BNX2X_ACCEPT_BROADCAST |
9558 BNX2X_ACCEPT_ALL_MULTICAST);
9559 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9564 /* rtnl_lock is held. */
9565 case DRV_CTL_STOP_L2_CMD: {
9566 u32 cli = ctl->data.ring.client_id;
9568 /* Stop accepting on iSCSI L2 ring */
9569 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9570 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9575 /* Unset iSCSI L2 MAC */
9576 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9579 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9580 int count = ctl->data.credit.credit_count;
9582 smp_mb__before_atomic_inc();
9583 atomic_add(count, &bp->spq_left);
9584 smp_mb__after_atomic_inc();
9589 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9596 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
9598 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9600 if (bp->flags & USING_MSIX_FLAG) {
9601 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9602 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9603 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9605 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9606 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9609 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9611 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9613 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9614 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
9615 cp->irq_arr[1].status_blk = bp->def_status_blk;
9616 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9617 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
9622 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9625 struct bnx2x *bp = netdev_priv(dev);
9626 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9631 if (atomic_read(&bp->intr_sem) != 0)
9634 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9638 bp->cnic_kwq_cons = bp->cnic_kwq;
9639 bp->cnic_kwq_prod = bp->cnic_kwq;
9640 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9642 bp->cnic_spq_pending = 0;
9643 bp->cnic_kwq_pending = 0;
9645 bp->cnic_data = data;
9648 cp->drv_state = CNIC_DRV_STATE_REGD;
9649 cp->iro_arr = bp->iro_arr;
9651 bnx2x_setup_cnic_irq_info(bp);
9653 rcu_assign_pointer(bp->cnic_ops, ops);
9658 static int bnx2x_unregister_cnic(struct net_device *dev)
9660 struct bnx2x *bp = netdev_priv(dev);
9661 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9663 mutex_lock(&bp->cnic_mutex);
9664 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9665 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9666 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9669 rcu_assign_pointer(bp->cnic_ops, NULL);
9670 mutex_unlock(&bp->cnic_mutex);
9672 kfree(bp->cnic_kwq);
9673 bp->cnic_kwq = NULL;
9678 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9680 struct bnx2x *bp = netdev_priv(dev);
9681 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9683 cp->drv_owner = THIS_MODULE;
9684 cp->chip_id = CHIP_ID(bp);
9685 cp->pdev = bp->pdev;
9686 cp->io_base = bp->regview;
9687 cp->io_base2 = bp->doorbells;
9688 cp->max_kwqe_pending = 8;
9689 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
9690 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9691 bnx2x_cid_ilt_lines(bp);
9692 cp->ctx_tbl_len = CNIC_ILT_LINES;
9693 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
9694 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9695 cp->drv_ctl = bnx2x_drv_ctl;
9696 cp->drv_register_cnic = bnx2x_register_cnic;
9697 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9698 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9699 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9701 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9702 "starting cid %d\n",
9709 EXPORT_SYMBOL(bnx2x_cnic_probe);
9711 #endif /* BCM_CNIC */